signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def _get_fixed_params ( im ) :
"""Parameters that the user has no influence on . Mostly chosen
bases on the input images ."""
|
p = Parameters ( )
if not isinstance ( im , np . ndarray ) :
return p
# Dimension of the inputs
p . FixedImageDimension = im . ndim
p . MovingImageDimension = im . ndim
# Always write result , so I can verify
p . WriteResultImage = True
# How to write the result
tmp = DTYPE_NP2ITK [ im . dtype . name ]
p . ResultImagePixelType = tmp . split ( '_' ) [ - 1 ] . lower ( )
p . ResultImageFormat = "mhd"
# Done
return p
|
def add ( self , docs , boost = None , fieldUpdates = None , commit = None , softCommit = False , commitWithin = None , waitFlush = None , waitSearcher = None , overwrite = None , handler = 'update' ) :
"""Adds or updates documents .
Requires ` ` docs ` ` , which is a list of dictionaries . Each key is the
field name and each value is the value to index .
Optionally accepts ` ` commit ` ` . Default is ` ` None ` ` . None signals to use default
Optionally accepts ` ` softCommit ` ` . Default is ` ` False ` ` .
Optionally accepts ` ` boost ` ` . Default is ` ` None ` ` .
Optionally accepts ` ` fieldUpdates ` ` . Default is ` ` None ` ` .
Optionally accepts ` ` commitWithin ` ` . Default is ` ` None ` ` .
Optionally accepts ` ` waitFlush ` ` . Default is ` ` None ` ` .
Optionally accepts ` ` waitSearcher ` ` . Default is ` ` None ` ` .
Optionally accepts ` ` overwrite ` ` . Default is ` ` None ` ` .
Usage : :
solr . add ( [
" id " : " doc _ 1 " ,
" title " : " A test document " ,
" id " : " doc _ 2 " ,
" title " : " The Banana : Tasty or Dangerous ? " ,"""
|
start_time = time . time ( )
self . log . debug ( "Starting to build add request..." )
message = ElementTree . Element ( 'add' )
if commitWithin :
message . set ( 'commitWithin' , commitWithin )
for doc in docs :
el = self . _build_doc ( doc , boost = boost , fieldUpdates = fieldUpdates )
message . append ( el )
# This returns a bytestring . Ugh .
m = ElementTree . tostring ( message , encoding = 'utf-8' )
# Convert back to Unicode please .
m = force_unicode ( m )
end_time = time . time ( )
self . log . debug ( "Built add request of %s docs in %0.2f seconds." , len ( message ) , end_time - start_time )
return self . _update ( m , commit = commit , softCommit = softCommit , waitFlush = waitFlush , waitSearcher = waitSearcher , overwrite = overwrite , handler = handler )
|
def destroy ( self ) :
'''Tear down the syndic minion'''
|
# We borrowed the local clients poller so give it back before
# it ' s destroyed . Reset the local poller reference .
super ( Syndic , self ) . destroy ( )
if hasattr ( self , 'local' ) :
del self . local
if hasattr ( self , 'forward_events' ) :
self . forward_events . stop ( )
|
def get_text ( self ) :
"""Return extended progress bar text"""
|
done_units = to_reasonable_unit ( self . done , self . units )
current = round ( self . current / done_units [ 'multiplier' ] , 2 )
percent = int ( self . current * 100 / self . done )
return '{0:.2f} of {1:.2f} {2} ({3}%)' . format ( current , done_units [ 'val' ] , done_units [ 'label' ] , percent )
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'custom_language_model' ) and self . custom_language_model is not None :
_dict [ 'custom_language_model' ] = self . custom_language_model
if hasattr ( self , 'speaker_labels' ) and self . speaker_labels is not None :
_dict [ 'speaker_labels' ] = self . speaker_labels
return _dict
|
def get_scalar ( mesh , name , preference = 'cell' , info = False , err = False ) :
"""Searches both point and cell data for an array
Parameters
name : str
The name of the array to get the range .
preference : str , optional
When scalars is specified , this is the perfered scalar type to
search for in the dataset . Must be either ` ` ' point ' ` ` or ` ` ' cell ' ` `
info : bool
Return info about the scalar rather than the array itself .
err : bool
Boolean to control whether to throw an error if array is not present ."""
|
parr = point_scalar ( mesh , name )
carr = cell_scalar ( mesh , name )
if isinstance ( preference , str ) :
if preference in [ 'cell' , 'c' , 'cells' ] :
preference = CELL_DATA_FIELD
elif preference in [ 'point' , 'p' , 'points' ] :
preference = POINT_DATA_FIELD
else :
raise RuntimeError ( 'Data field ({}) not supported.' . format ( preference ) )
if all ( [ parr is not None , carr is not None ] ) :
if preference == CELL_DATA_FIELD :
if info :
return carr , CELL_DATA_FIELD
else :
return carr
elif preference == POINT_DATA_FIELD :
if info :
return parr , POINT_DATA_FIELD
else :
return parr
else :
raise RuntimeError ( 'Data field ({}) not supported.' . format ( preference ) )
arr = None
field = None
if parr is not None :
arr = parr
field = 0
elif carr is not None :
arr = carr
field = 1
elif err :
raise KeyError ( 'Data scalar ({}) not present in this dataset.' . format ( name ) )
if info :
return arr , field
return arr
|
def print_lines ( process : Popen ) -> None :
"""Let a subprocess : func : ` communicate ` , then write both its ` ` stdout ` ` and
its ` ` stderr ` ` to our ` ` stdout ` ` ."""
|
out , err = process . communicate ( )
if out :
for line in out . decode ( "utf-8" ) . splitlines ( ) :
print ( line )
if err :
for line in err . decode ( "utf-8" ) . splitlines ( ) :
print ( line )
|
def read_parquet ( cls , path , engine , columns , ** kwargs ) :
"""Load a parquet object from the file path , returning a DataFrame .
Ray DataFrame only supports pyarrow engine for now .
Args :
path : The filepath of the parquet file .
We only support local files for now .
engine : Ray only support pyarrow reader .
This argument doesn ' t do anything for now .
kwargs : Pass into parquet ' s read _ pandas function .
Notes :
ParquetFile API is used . Please refer to the documentation here
https : / / arrow . apache . org / docs / python / parquet . html"""
|
from pyarrow . parquet import ParquetFile
if cls . read_parquet_remote_task is None :
return super ( RayIO , cls ) . read_parquet ( path , engine , columns , ** kwargs )
if not columns :
pf = ParquetFile ( path )
columns = [ name for name in pf . metadata . schema . names if not PQ_INDEX_REGEX . match ( name ) ]
num_partitions = cls . frame_mgr_cls . _compute_num_partitions ( )
num_splits = min ( len ( columns ) , num_partitions )
# Each item in this list will be a list of column names of the original df
column_splits = ( len ( columns ) // num_partitions if len ( columns ) % num_partitions == 0 else len ( columns ) // num_partitions + 1 )
col_partitions = [ columns [ i : i + column_splits ] for i in range ( 0 , len ( columns ) , column_splits ) ]
# Each item in this list will be a list of columns of original df
# partitioned to smaller pieces along rows .
# We need to transpose the oids array to fit our schema .
blk_partitions = np . array ( [ cls . read_parquet_remote_task . _remote ( args = ( path , cols , num_splits , kwargs ) , num_return_vals = num_splits + 1 , ) for cols in col_partitions ] ) . T
remote_partitions = np . array ( [ [ cls . frame_partition_cls ( obj ) for obj in row ] for row in blk_partitions [ : - 1 ] ] )
index_len = ray . get ( blk_partitions [ - 1 ] [ 0 ] )
index = pandas . RangeIndex ( index_len )
new_query_compiler = cls . query_compiler_cls ( cls . frame_mgr_cls ( remote_partitions ) , index , columns )
return new_query_compiler
|
def quattodcm ( quat ) :
"""Convert quaternion to DCM
This function will convert a quaternion to the equivalent rotation matrix ,
or direction cosine matrix .
Parameters :
quat - ( 4 , ) numpy array
Array defining a quaterion where the quaternion is defined in terms of
a vector and a scalar part . The vector is related to the eigen axis
and equivalent in both reference frames [ x y z w ]
Returns :
dcm - ( 3,3 ) numpy array
Numpy rotation matrix which defines a rotation from the b to a frame"""
|
dcm = ( quat [ - 1 ] ** 2 - np . inner ( quat [ 0 : 3 ] , quat [ 0 : 3 ] ) ) * np . eye ( 3 , 3 ) + 2 * np . outer ( quat [ 0 : 3 ] , quat [ 0 : 3 ] ) + 2 * quat [ - 1 ] * hat_map ( quat [ 0 : 3 ] )
return dcm
|
def _planck_deriv ( self , lam , Teff ) :
"""Computes the derivative of the monochromatic blackbody intensity using
the Planck function .
@ lam : wavelength in m
@ Teff : effective temperature in K
Returns : the derivative of monochromatic blackbody intensity"""
|
expterm = np . exp ( self . h * self . c / lam / self . k / Teff )
return 2 * self . h * self . c * self . c / self . k / Teff / lam ** 7 * ( expterm - 1 ) ** - 2 * ( self . h * self . c * expterm - 5 * lam * self . k * Teff * ( expterm - 1 ) )
|
def check_time_event ( oqparam , occupancy_periods ) :
"""Check the ` time _ event ` parameter in the datastore , by comparing
with the periods found in the exposure ."""
|
time_event = oqparam . time_event
if time_event and time_event not in occupancy_periods :
raise ValueError ( 'time_event is %s in %s, but the exposure contains %s' % ( time_event , oqparam . inputs [ 'job_ini' ] , ', ' . join ( occupancy_periods ) ) )
|
def predict ( self , X ) :
"""Apply transforms to the data , and predict with the final estimator
Parameters
X : iterable
Data to predict on . Must fulfill input requirements of first step
of the pipeline .
Returns
yp : array - like
Predicted transformed target"""
|
Xt , _ , _ = self . _transform ( X )
return self . _final_estimator . predict ( Xt )
|
def asfreq ( obj , freq , method = None , how = None , normalize = False , fill_value = None ) :
"""Utility frequency conversion method for Series / DataFrame ."""
|
if isinstance ( obj . index , PeriodIndex ) :
if method is not None :
raise NotImplementedError ( "'method' argument is not supported" )
if how is None :
how = 'E'
new_obj = obj . copy ( )
new_obj . index = obj . index . asfreq ( freq , how = how )
elif len ( obj . index ) == 0 :
new_obj = obj . copy ( )
new_obj . index = obj . index . _shallow_copy ( freq = to_offset ( freq ) )
else :
dti = date_range ( obj . index [ 0 ] , obj . index [ - 1 ] , freq = freq )
dti . name = obj . index . name
new_obj = obj . reindex ( dti , method = method , fill_value = fill_value )
if normalize :
new_obj . index = new_obj . index . normalize ( )
return new_obj
|
def query_metric_definition ( self , metric_type , metric_id ) :
"""Query definition of a single metric id .
: param metric _ type : MetricType to be matched ( required )
: param metric _ id : Exact string matching metric id"""
|
return self . _get ( self . _get_metrics_single_url ( metric_type , metric_id ) )
|
def restart ( self ) :
"""Restart application with same args as it was started .
Does * * not * * return"""
|
if self . verbose :
print ( "Restarting {} {} ..." . format ( sys . executable , sys . argv ) )
os . execl ( sys . executable , * ( [ sys . executable ] + sys . argv ) )
|
def _apply_cen_to_throats ( self , p_cen , t_cen , t_norm , men_cen ) :
r'''Take the pore center and throat center and work out which way
the throat normal is pointing relative to the vector between centers .
Offset the meniscus center along the throat vector in the correct
direction'''
|
v = p_cen - t_cen
sign = np . sign ( np . sum ( v * t_norm , axis = 1 ) )
c3 = np . vstack ( ( men_cen * sign , men_cen * sign , men_cen * sign ) ) . T
coords = t_cen + c3 * t_norm
return coords
|
def parse_options ( metadata ) :
"""Parse argument options ."""
|
parser = argparse . ArgumentParser ( description = '%(prog)s usage:' , prog = __prog__ )
setoption ( parser , metadata = metadata )
return parser
|
def urlsplit ( name ) :
"""Parse : param : ` name ` into ( netloc , port , ssl )"""
|
if not ( isinstance ( name , string_types ) ) :
name = str ( name )
if not name . startswith ( ( 'http://' , 'https://' ) ) :
name = 'http://' + name
rv = urlparse ( name )
if rv . scheme == 'https' and rv . port is None :
return rv . netloc , 443 , True
return rv . netloc . rsplit ( ':' ) [ 0 ] , rv . port or 80 , rv . scheme == 'https'
|
def _addDBParam ( self , name , value ) :
"""Adds a database parameter"""
|
if name [ - 4 : ] == '__OP' :
return self . _setComparasionOperator ( name [ : - 4 ] , value )
if name [ - 3 : ] == '.op' :
return self . _setComparasionOperator ( name [ : - 3 ] , value )
if name . find ( '__' ) != - 1 :
import re
name = name . replace ( '__' , '::' )
elif name . find ( '.' ) != - 1 :
name = name . replace ( '.' , '::' )
self . _dbParams . append ( [ name , value ] )
|
def convert_dict_to_datetime ( obj_map ) :
"""converts dictionary representations of datetime back to datetime obj"""
|
converted_map = { }
for key , value in obj_map . items ( ) :
if isinstance ( value , dict ) and 'tzinfo' in value . keys ( ) :
converted_map [ key ] = datetime . datetime ( ** value )
elif isinstance ( value , dict ) :
converted_map [ key ] = convert_dict_to_datetime ( value )
elif isinstance ( value , list ) :
updated_list = [ ]
for internal_item in value :
if isinstance ( internal_item , dict ) :
updated_list . append ( convert_dict_to_datetime ( internal_item ) )
else :
updated_list . append ( internal_item )
converted_map [ key ] = updated_list
else :
converted_map [ key ] = value
return converted_map
|
def wrap ( self , string , width ) :
"""Wrap lines according to width
Place ' \n ' whenever necessary"""
|
if not string or width <= 0 :
logging . error ( "invalid string: %s or width: %s" % ( string , width ) )
return False
tmp = ""
for line in string . splitlines ( ) :
if len ( line ) <= width :
tmp += line + "\n"
continue
cur = 0
length = len ( line )
while cur + width < length :
cur = line [ : cur + width ] . rfind ( self . sep ) + len ( self . sep ) - 1
line = line [ : cur ] + "\n" + line [ cur + 1 : ]
tmp += line + "\n\n"
return tmp
|
def initialize_path ( self , path_num = None ) :
"""inits producer for next path , i . e . sets current state to initial state"""
|
for p in self . producers :
p . initialize_path ( path_num )
# self . state = copy ( self . initial _ state )
# self . state . path = path _ num
self . random . seed ( hash ( self . seed ) + hash ( path_num ) )
|
def fsl2antstransform ( matrix , reference , moving ) :
"""Convert an FSL linear transform to an antsrTransform
ANTsR function : ` fsl2antsrtransform `
Arguments
matrix : ndarray / list
4x4 matrix of transform parameters
reference : ANTsImage
target image
moving : ANTsImage
moving image
Returns
ANTsTransform
Examples
> > > import ants
> > > import numpy as np
> > > fslmat = np . zeros ( ( 4,4 ) )
> > > np . fill _ diagonal ( fslmat , 1)
> > > img = ants . image _ read ( ants . get _ ants _ data ( ' ch2 ' ) )
> > > tx = ants . fsl2antstransform ( fslmat , img , img )"""
|
if reference . dimension != 3 :
raise ValueError ( 'reference image must be 3 dimensions' )
if reference . pixeltype != 'float' :
reference = reference . clone ( 'float' )
if moving . pixeltype != 'float' :
moving = moving . clone ( 'float' )
libfn = utils . get_lib_fn ( 'fsl2antstransformF3' )
tx_ptr = libfn ( list ( matrix ) , reference . pointer , moving . pointer , 1 )
return tio . ANTsTransform ( precision = 'float' , dimension = reference . dimension , transform_type = 'AffineTransform' , pointer = tx_ptr )
|
def shell_out_ignore_exitcode ( cmd , stderr = STDOUT , cwd = None ) :
"""Same as shell _ out but doesn ' t raise if the cmd exits badly ."""
|
try :
return shell_out ( cmd , stderr = stderr , cwd = cwd )
except CalledProcessError as c :
return _clean_output ( c . output )
|
def compute_positions ( cls , screen_width , line ) :
"""Compute the relative position of the fields on a given line .
Args :
screen _ width ( int ) : the width of the screen
line ( mpdlcd . display _ fields . Field list ) : the list of fields on the
line
Returns :
( ( int , mpdlcd . display _ fields . Field ) list ) : the positions of fields ,
as ( position , field ) tuples .
Raises :
FormatError : if the line contains more than one flexible field , or
is too long for the screen size ."""
|
# First index
left = 1
# Last index
right = screen_width + 1
# Current ' flexible ' field
flexible = None
# Compute the space to the left and to the right of the ( optional )
# flexible field .
for field in line :
if field . is_flexible ( ) :
if flexible :
raise FormatError ( 'There can be only one flexible field per line.' )
flexible = field
elif not flexible :
left += field . width
else : # Met a ' flexible ' , computing from the right
right -= field . width
# Available space for the ' flexible ' field
available = right - left
if available <= 0 :
raise FormatError ( "Too much data for screen width" )
if flexible :
if available < 1 :
raise FormatError ( "Not enough space to display flexible field %s" % flexible . name )
flexible . width = available
positions = [ ]
left = 1
for field in line :
positions . append ( ( left , field ) )
left += field . width
logger . debug ( 'Positions are %r' , positions )
return positions
|
def birth ( self ) :
'''Create the individual ( compute the spline curve )'''
|
splineReal = scipy . interpolate . splrep ( self . x , self . y . real )
self . y_int . real = scipy . interpolate . splev ( self . x_int , splineReal )
splineImag = scipy . interpolate . splrep ( self . x , self . y . imag )
self . y_int . imag = scipy . interpolate . splev ( self . x_int , splineImag )
|
def _code_contents ( code , docstring = None ) :
"""Return the signature contents of a code object .
By providing direct access to the code object of the
function , Python makes this extremely easy . Hooray !
Unfortunately , older versions of Python include line
number indications in the compiled byte code . Boo !
So we remove the line number byte codes to prevent
recompilations from moving a Python function .
See :
- https : / / docs . python . org / 2 / library / inspect . html
- http : / / python - reference . readthedocs . io / en / latest / docs / code / index . html
For info on what each co \ _ variable provides
The signature is as follows ( should be byte / chars ) :
co _ argcount , len ( co _ varnames ) , len ( co _ cellvars ) , len ( co _ freevars ) ,
( comma separated signature for each object in co _ consts ) ,
( comma separated signature for each object in co _ names ) ,
( The bytecode with line number bytecodes removed from co _ code )
co _ argcount - Returns the number of positional arguments ( including arguments with default values ) .
co _ varnames - Returns a tuple containing the names of the local variables ( starting with the argument names ) .
co _ cellvars - Returns a tuple containing the names of local variables that are referenced by nested functions .
co _ freevars - Returns a tuple containing the names of free variables . ( ? )
co _ consts - Returns a tuple containing the literals used by the bytecode .
co _ names - Returns a tuple containing the names used by the bytecode .
co _ code - Returns a string representing the sequence of bytecode instructions ."""
|
# contents = [ ]
# The code contents depends on the number of local variables
# but not their actual names .
contents = bytearray ( "{}, {}" . format ( code . co_argcount , len ( code . co_varnames ) ) , 'utf-8' )
contents . extend ( b", " )
contents . extend ( bytearray ( str ( len ( code . co_cellvars ) ) , 'utf-8' ) )
contents . extend ( b", " )
contents . extend ( bytearray ( str ( len ( code . co_freevars ) ) , 'utf-8' ) )
# The code contents depends on any constants accessed by the
# function . Note that we have to call _ object _ contents on each
# constants because the code object of nested functions can
# show - up among the constants .
z = [ _object_contents ( cc ) for cc in code . co_consts [ 1 : ] ]
contents . extend ( b',(' )
contents . extend ( bytearray ( ',' , 'utf-8' ) . join ( z ) )
contents . extend ( b')' )
# The code contents depends on the variable names used to
# accessed global variable , as changing the variable name changes
# the variable actually accessed and therefore changes the
# function result .
z = [ bytearray ( _object_contents ( cc ) ) for cc in code . co_names ]
contents . extend ( b',(' )
contents . extend ( bytearray ( ',' , 'utf-8' ) . join ( z ) )
contents . extend ( b')' )
# The code contents depends on its actual code ! ! !
contents . extend ( b',(' )
contents . extend ( code . co_code )
contents . extend ( b')' )
return contents
|
def add_dependency ( id = None , name = None , dependency_id = None , dependency_name = None ) :
"""Add an existing BuildConfiguration as a dependency to another BuildConfiguration ."""
|
data = add_dependency_raw ( id , name , dependency_id , dependency_name )
if data :
return utils . format_json_list ( data )
|
def dof ( self ) :
"""Returns the DoF of the robot ( with grippers ) ."""
|
dof = self . mujoco_robot . dof
if self . has_gripper :
dof += self . gripper . dof
return dof
|
def _parse_file_modify ( self , info ) :
"""Parse a filemodify command within a commit .
: param info : a string in the format " mode dataref path "
( where dataref might be the hard - coded literal ' inline ' ) ."""
|
params = info . split ( b' ' , 2 )
path = self . _path ( params [ 2 ] )
mode = self . _mode ( params [ 0 ] )
if params [ 1 ] == b'inline' :
dataref = None
data = self . _get_data ( b'filemodify' )
else :
dataref = params [ 1 ]
data = None
return commands . FileModifyCommand ( path , mode , dataref , data )
|
def stop ( self ) :
"""Stop the sensor"""
|
# check that everything is running
if not self . _running :
logging . warning ( 'Kinect not running. Aborting stop' )
return False
# stop subs
self . _image_sub . unregister ( )
self . _depth_sub . unregister ( )
self . _camera_info_sub . unregister
self . _running = False
return True
|
def update_file_metadata ( self , uri , filename = None , description = None , mtime = None , privacy = None ) :
"""Update file metadata .
uri - - MediaFire file URI
Supplying the following keyword arguments would change the
metadata on the server side :
filename - - rename file
description - - set file description string
mtime - - set file modification time
privacy - - set file privacy - ' private ' or ' public '"""
|
resource = self . get_resource_by_uri ( uri )
if not isinstance ( resource , File ) :
raise ValueError ( 'Expected File, got {}' . format ( type ( resource ) ) )
result = self . api . file_update ( resource [ 'quickkey' ] , filename = filename , description = description , mtime = mtime , privacy = privacy )
return result
|
def _water ( cls , T , P ) :
"""Get properties of pure water , Table4 pag 8"""
|
water = IAPWS95 ( P = P , T = T )
prop = { }
prop [ "g" ] = water . h - T * water . s
prop [ "gt" ] = - water . s
prop [ "gp" ] = 1. / water . rho
prop [ "gtt" ] = - water . cp / T
prop [ "gtp" ] = water . betas * water . cp / T
prop [ "gpp" ] = - 1e6 / ( water . rho * water . w ) ** 2 - water . betas ** 2 * 1e3 * water . cp / T
prop [ "gs" ] = 0
prop [ "gsp" ] = 0
prop [ "thcond" ] = water . k
return prop
|
def plot_lf_hf ( x , xlf , xhf , title = '' ) :
'''Plot original signal , low - pass filtered , and high - pass filtered signals
Args
x : ndarray
Signal data array
xlf : ndarray
Low - pass filtered signal
xhf : ndarray
High - pass filtered signal
title : str
Main title of plot'''
|
from . import plotutils
fig , ( ax1 , ax2 , ax3 ) = plt . subplots ( 3 , 1 , sharex = True , sharey = True )
plt . title ( title )
# ax1 . title . set _ text ( ' All freqencies ' )
ax1 . plot ( range ( len ( x ) ) , x , color = _colors [ 0 ] , linewidth = _linewidth , label = 'original' )
ax1 . legend ( loc = 'upper right' )
# ax2 . title . set _ text ( ' Low - pass filtered ' )
ax2 . plot ( range ( len ( xlf ) ) , xlf , color = _colors [ 1 ] , linewidth = _linewidth , label = 'low-pass' )
ax2 . legend ( loc = 'upper right' )
ax2 . set_ylabel ( 'Frequency (Hz)' )
# ax3 . title . set _ text ( ' High - pass filtered ' )
ax3 . plot ( range ( len ( xhf ) ) , xhf , color = _colors [ 2 ] , linewidth = _linewidth , label = 'high-pass' )
ax3 . legend ( loc = 'upper right' )
ax1 , ax2 , ax3 = plotutils . add_alpha_labels ( [ ax1 , ax2 , ax3 ] , color = 'black' , facecolor = 'none' , edgecolor = 'none' )
# TODO break into util function
# Convert sample # ticks to times
total_seconds = ax3 . get_xticks ( ) / 16
# with timedelta : ( stop - start ) . total _ seconds ( )
hours , remainder = divmod ( total_seconds , 3600 )
minutes , seconds = divmod ( remainder , 60 )
strtime = lambda minutes , seconds : '{:.0f}:{:02.0f}' . format ( minutes , seconds )
labels = list ( map ( strtime , minutes , seconds ) )
ax3 . set_xticklabels ( labels )
plt . xlabel ( 'Sample time (minute:second)' )
plt . show ( )
return None
|
def assertCategoricalLevelsEqual ( self , levels1 , levels2 , msg = None ) :
'''Fail if ` ` levels1 ` ` and ` ` levels2 ` ` do not have the same
domain .
Parameters
levels1 : iterable
levels2 : iterable
msg : str
If not provided , the : mod : ` marbles . mixins ` or
: mod : ` unittest ` standard message will be used .
Raises
TypeError
If either ` ` levels1 ` ` or ` ` levels2 ` ` is not iterable .'''
|
if not isinstance ( levels1 , collections . Iterable ) :
raise TypeError ( 'First argument is not iterable' )
if not isinstance ( levels2 , collections . Iterable ) :
raise TypeError ( 'Second argument is not iterable' )
standardMsg = '%s levels != %s levels' % ( levels1 , levels2 )
if not all ( level in levels2 for level in levels1 ) :
self . fail ( self . _formatMessage ( msg , standardMsg ) )
if not all ( level in levels1 for level in levels2 ) :
self . fail ( self . _formatMessage ( msg , standardMsg ) )
|
def progress ( self ) :
"""Returns the percentage , current and total number of
jobs in the queue ."""
|
total = len ( self . all_jobs )
remaining = total - len ( self . active_jobs ) if total > 0 else 0
percent = int ( 100 * ( float ( remaining ) / total ) ) if total > 0 else 0
return percent
|
def cli ( env , sortby , columns , datacenter , username , storage_type ) :
"""List file storage ."""
|
file_manager = SoftLayer . FileStorageManager ( env . client )
file_volumes = file_manager . list_file_volumes ( datacenter = datacenter , username = username , storage_type = storage_type , mask = columns . mask ( ) )
table = formatting . Table ( columns . columns )
table . sortby = sortby
for file_volume in file_volumes :
table . add_row ( [ value or formatting . blank ( ) for value in columns . row ( file_volume ) ] )
env . fout ( table )
|
def download ( self , file_path = None , verbose = None , silent = None , ignore_existing = None , checksum = None , destdir = None , retries = None , ignore_errors = None , fileobj = None , return_responses = None , no_change_timestamp = None , params = None ) :
"""Download the file into the current working directory .
: type file _ path : str
: param file _ path : Download file to the given file _ path .
: type verbose : bool
: param verbose : ( optional ) Turn on verbose output .
: type silent : bool
: param silent : ( optional ) Suppress all output .
: type ignore _ existing : bool
: param ignore _ existing : Overwrite local files if they already
exist .
: type checksum : bool
: param checksum : ( optional ) Skip downloading file based on checksum .
: type destdir : str
: param destdir : ( optional ) The directory to download files to .
: type retries : int
: param retries : ( optional ) The number of times to retry on failed
requests .
: type ignore _ errors : bool
: param ignore _ errors : ( optional ) Don ' t fail if a single file fails to
download , continue to download other files .
: type fileobj : file - like object
: param fileobj : ( optional ) Write data to the given file - like object
( e . g . sys . stdout ) .
: type return _ responses : bool
: param return _ responses : ( optional ) Rather than downloading files to disk , return
a list of response objects .
: type no _ change _ timestamp : bool
: param no _ change _ timestamp : ( optional ) If True , leave the time stamp as the
current time instead of changing it to that given in
the original archive .
: type params : dict
: param params : ( optional ) URL parameters to send with
download request ( e . g . ` cnt = 0 ` ) .
: rtype : bool
: returns : True if file was successfully downloaded ."""
|
verbose = False if verbose is None else verbose
ignore_existing = False if ignore_existing is None else ignore_existing
checksum = False if checksum is None else checksum
retries = 2 if not retries else retries
ignore_errors = False if not ignore_errors else ignore_errors
return_responses = False if not return_responses else return_responses
no_change_timestamp = False if not no_change_timestamp else no_change_timestamp
params = None if not params else params
if ( fileobj and silent is None ) or silent is not False :
silent = True
else :
silent = False
self . item . session . mount_http_adapter ( max_retries = retries )
file_path = self . name if not file_path else file_path
if destdir :
if not os . path . exists ( destdir ) and return_responses is not True :
os . mkdir ( destdir )
if os . path . isfile ( destdir ) :
raise IOError ( '{} is not a directory!' . format ( destdir ) )
file_path = os . path . join ( destdir , file_path )
if not return_responses and os . path . exists ( file_path . encode ( 'utf-8' ) ) :
if ignore_existing :
msg = 'skipping {0}, file already exists.' . format ( file_path )
log . info ( msg )
if verbose :
print ( ' ' + msg )
elif silent is False :
print ( '.' , end = '' )
sys . stdout . flush ( )
return
elif checksum :
with open ( file_path , 'rb' ) as fp :
md5_sum = utils . get_md5 ( fp )
if md5_sum == self . md5 :
msg = ( 'skipping {0}, ' 'file already exists based on checksum.' . format ( file_path ) )
log . info ( msg )
if verbose :
print ( ' ' + msg )
elif silent is False :
print ( '.' , end = '' )
sys . stdout . flush ( )
return
else :
st = os . stat ( file_path . encode ( 'utf-8' ) )
if ( st . st_mtime == self . mtime ) and ( st . st_size == self . size ) or self . name . endswith ( '_files.xml' ) and st . st_size != 0 :
msg = ( 'skipping {0}, file already exists ' 'based on length and date.' . format ( file_path ) )
log . info ( msg )
if verbose :
print ( ' ' + msg )
elif silent is False :
print ( '.' , end = '' )
sys . stdout . flush ( )
return
parent_dir = os . path . dirname ( file_path )
if parent_dir != '' and not os . path . exists ( parent_dir ) and return_responses is not True :
os . makedirs ( parent_dir )
try :
response = self . item . session . get ( self . url , stream = True , timeout = 12 , auth = self . auth , params = params )
response . raise_for_status ( )
if return_responses :
return response
chunk_size = 2048
if not fileobj :
fileobj = open ( file_path . encode ( 'utf-8' ) , 'wb' )
with fileobj :
for chunk in response . iter_content ( chunk_size = chunk_size ) :
if chunk :
fileobj . write ( chunk )
fileobj . flush ( )
except ( RetryError , HTTPError , ConnectTimeout , ConnectionError , socket . error , ReadTimeout ) as exc :
msg = ( 'error downloading file {0}, ' 'exception raised: {1}' . format ( file_path , exc ) )
log . error ( msg )
if os . path . exists ( file_path ) :
os . remove ( file_path )
if verbose :
print ( ' ' + msg )
elif silent is False :
print ( 'e' , end = '' )
sys . stdout . flush ( )
if ignore_errors is True :
return False
else :
raise exc
# Set mtime with mtime from files . xml .
if not no_change_timestamp : # If we want to set the timestamp to that of the original archive . . .
try :
os . utime ( file_path . encode ( 'utf-8' ) , ( 0 , self . mtime ) )
except OSError : # Probably file - like object , e . g . sys . stdout .
pass
msg = 'downloaded {0}/{1} to {2}' . format ( self . identifier , self . name , file_path )
log . info ( msg )
if verbose :
print ( ' ' + msg )
elif silent is False :
print ( 'd' , end = '' )
sys . stdout . flush ( )
return True
|
def run_RNAplfold ( input_fasta , tmpdir , W = 240 , L = 160 , U = 1 ) :
"""Arguments :
W , Int : span - window length
L , Int , maxiumm span
U , Int , size of unpaired region"""
|
profiles = RNAplfold_PROFILES_EXECUTE
for i , P in enumerate ( profiles ) :
print ( "running {P}_RNAplfold... ({i}/{N})" . format ( P = P , i = i + 1 , N = len ( profiles ) ) )
command = "{bin}/{P}_RNAplfold" . format ( bin = RNAplfold_BIN_DIR , P = P )
file_out = "{tmp}/{P}_profile.fa" . format ( tmp = tmpdir , P = P )
args = " -W {W} -L {L} -u {U} < {fa} > {file_out}" . format ( W = W , L = L , U = U , fa = input_fasta , file_out = file_out )
os . system ( command + args )
# check if the file is empty
if os . path . getsize ( file_out ) == 0 :
raise Exception ( "command wrote an empty file: {0}" . format ( file_out ) )
print ( "done!" )
|
def deriv2 ( self , p ) :
"""Second derivative of the Cauchy link function .
Parameters
p : array - like
Probabilities
Returns
g ' ' ( p ) : array
Value of the second derivative of Cauchy link function at ` p `"""
|
a = np . pi * ( p - 0.5 )
d2 = 2 * np . pi ** 2 * np . sin ( a ) / np . cos ( a ) ** 3
return d2
|
def get_object ( self , name , * argv , ** kwargs ) :
"""Get url object tuple for url
: param name : url regexp from
: type name : str
: param argv : overrided args
: param kwargs : overrided kwargs
: return : url object
: rtype : django . conf . urls . url"""
|
regexp = name
options = self . opts ( regexp )
options . update ( kwargs )
args = options . pop ( 'view_args' , argv )
csrf_enable = self . get_backend_data ( regexp ) . get ( 'CSRF_ENABLE' , True )
if regexp in self . settings_urls :
regexp = r'^{}' . format ( self . get_django_settings ( regexp ) [ 1 : ] )
view = self [ name ] . as_view ( )
if not csrf_enable :
view = csrf_exempt ( view )
return url ( regexp , view , * args , ** options )
|
def _set_action_profile_event_actions ( self , v , load = False ) :
"""Setter method for action _ profile _ event _ actions , mapped from YANG variable / cfm _ state / cfm _ y1731 / action _ profile / action _ profile _ event _ actions ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ action _ profile _ event _ actions is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ action _ profile _ event _ actions ( ) directly .
YANG Description : Action Profile Event type and associated actions"""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = action_profile_event_actions . action_profile_event_actions , is_container = 'container' , presence = False , yang_name = "action-profile-event-actions" , rest_name = "action-profile-event-actions" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'dot1ag-action-profile-event-actions' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-dot1ag-operational' , defining_module = 'brocade-dot1ag-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """action_profile_event_actions must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=action_profile_event_actions.action_profile_event_actions, is_container='container', presence=False, yang_name="action-profile-event-actions", rest_name="action-profile-event-actions", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'dot1ag-action-profile-event-actions', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag-operational', defining_module='brocade-dot1ag-operational', yang_type='container', is_config=False)""" , } )
self . __action_profile_event_actions = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def currentView ( cls , parent = None ) :
"""Returns the current view for the given class within a viewWidget . If
no view widget is supplied , then a blank view is returned .
: param viewWidget | < projexui . widgets . xviewwidget . XViewWidget > | | None
: return < XView > | | None"""
|
if parent is None :
parent = projexui . topWindow ( )
for inst in parent . findChildren ( cls ) :
if inst . isCurrent ( ) :
return inst
return None
|
def call_api ( self , table , column , value , ** kwargs ) :
"""Exposed method to connect and query the EPA ' s API ."""
|
try :
output_format = kwargs . pop ( 'output_format' )
except KeyError :
output_format = self . output_format
url_list = [ self . base_url , table , column , quote ( value ) , 'rows' ]
rows_count = self . _number_of_rows ( ** kwargs )
url_list . append ( rows_count )
url_string = '/' . join ( url_list )
xml_data = urlopen ( url_string ) . read ( )
data = self . _format_data ( output_format , xml_data )
return data
|
def get_group_entitlement ( self , group_id ) :
"""GetGroupEntitlement .
[ Preview API ] Get a group entitlement .
: param str group _ id : ID of the group .
: rtype : : class : ` < GroupEntitlement > < azure . devops . v5_0 . member _ entitlement _ management . models . GroupEntitlement > `"""
|
route_values = { }
if group_id is not None :
route_values [ 'groupId' ] = self . _serialize . url ( 'group_id' , group_id , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '2280bffa-58a2-49da-822e-0764a1bb44f7' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'GroupEntitlement' , response )
|
def split_pulls ( all_issues , project = "arokem/python-matlab-bridge" ) :
"""split a list of closed issues into non - PR Issues and Pull Requests"""
|
pulls = [ ]
issues = [ ]
for i in all_issues :
if is_pull_request ( i ) :
pull = get_pull_request ( project , i [ 'number' ] , auth = True )
pulls . append ( pull )
else :
issues . append ( i )
return issues , pulls
|
def send_signal ( self , request , response , forum ) :
"""Sends the signal associated with the view ."""
|
self . view_signal . send ( sender = self , forum = forum , user = request . user , request = request , response = response , )
|
def _call ( self , x , out = None ) :
"""Evaluate all operators in ` ` x ` ` and broadcast ."""
|
wrapped_x = self . prod_op . domain . element ( [ x ] , cast = False )
return self . prod_op ( wrapped_x , out = out )
|
def odo_register ( ) :
'''Enable conversion of . sdmx files with odo ( http : / / odo . readthedocs . org ) .
Adds conversion from sdmx to PD . DataFrame to odo graph .
Note that native discovery of sdmx files is not yet supported . odo will thus
convert to PD . DataFrame
and discover the data shape from there .'''
|
logger . info ( 'Registering with odo...' )
import odo
from odo . utils import keywords
import pandas as PD
from toolz import keyfilter
import toolz . curried . operator as op
class PandaSDMX ( object ) :
def __init__ ( self , uri ) :
self . uri = uri
@ odo . resource . register ( r'.*\.sdmx' )
def resource_sdmx ( uri , ** kwargs ) :
return PandaSDMX ( uri )
@ odo . discover . register ( PandaSDMX )
def _ ( sdmx ) :
return odo . discover ( Request ( ) . get ( fromfile = sdmx . uri ) . write ( ) )
@ odo . convert . register ( PD . DataFrame , PandaSDMX )
def convert_sdmx ( sdmx , ** kwargs ) :
write = Request ( ) . get ( fromfile = sdmx . uri ) . write
return write ( ** keyfilter ( op . contains ( keywords ( write ) ) , kwargs ) )
logger . info ( 'odo registration complete.' )
|
def _swap_curly ( string ) :
"""Swap single and double curly brackets"""
|
return ( string . replace ( '{{ ' , '{{' ) . replace ( '{{' , '\x00' ) . replace ( '{' , '{{' ) . replace ( '\x00' , '{' ) . replace ( ' }}' , '}}' ) . replace ( '}}' , '\x00' ) . replace ( '}' , '}}' ) . replace ( '\x00' , '}' ) )
|
def create_schema ( self , check_if_exists = False , sync_schema = True , verbosity = 1 ) :
"""Creates the schema ' schema _ name ' for this tenant . Optionally checks if
the schema already exists before creating it . Returns true if the
schema was created , false otherwise ."""
|
# safety check
_check_schema_name ( self . schema_name )
cursor = connection . cursor ( )
if check_if_exists and schema_exists ( self . schema_name ) :
return False
# create the schema
cursor . execute ( 'CREATE SCHEMA %s' % self . schema_name )
if sync_schema :
call_command ( 'migrate_schemas' , schema_name = self . schema_name , interactive = False , verbosity = verbosity )
connection . set_schema_to_public ( )
|
def set_window_user_pointer ( window , pointer ) :
"""Sets the user pointer of the specified window . You may pass a normal python object into this function and it will
be wrapped automatically . The object will be kept in existence until the pointer is set to something else or
until the window is destroyed .
Wrapper for :
void glfwSetWindowUserPointer ( GLFWwindow * window , void * pointer ) ;"""
|
data = ( False , pointer )
if not isinstance ( pointer , ctypes . c_void_p ) :
data = ( True , pointer )
# Create a void pointer for the python object
pointer = ctypes . cast ( ctypes . pointer ( ctypes . py_object ( pointer ) ) , ctypes . c_void_p )
window_addr = ctypes . cast ( ctypes . pointer ( window ) , ctypes . POINTER ( ctypes . c_long ) ) . contents . value
_window_user_data_repository [ window_addr ] = data
_glfw . glfwSetWindowUserPointer ( window , pointer )
|
def send ( self , tid , session , feature = None ) :
'''taobao . logistics . dummy . send 无需物流 ( 虚拟 ) 发货处理
用户调用该接口可实现无需物流 ( 虚拟 ) 发货 , 使用该接口发货 , 交易订单状态会直接变成卖家已发货'''
|
request = TOPRequest ( 'taobao.logistics.dummy.send' )
request [ 'tid' ] = tid
if feature != None :
request [ 'feature' ] = feature
self . create ( self . execute ( request , session ) )
return self . shipping
|
def entries ( self ) :
"""Return the entries ( as a { date : entries } dict ) of all timesheets in the
collection ."""
|
entries_list = self . _timesheets_callback ( 'entries' ) ( )
return reduce ( lambda x , y : x + y , entries_list )
|
def _choose_unit ( value , unit = None , asciimode = None ) :
"""Finds a good unit to print seconds in
Args :
value ( float ) : measured value in seconds
unit ( str ) : if specified , overrides heuristic decision
asciimode ( bool ) : if True , forces ascii for microseconds
Returns :
tuple [ ( str , float ) ] : suffix , mag :
string suffix and conversion factor
Example :
> > > assert _ choose _ unit ( 1.1 , unit = None ) [ 0 ] = = ' s '
> > > assert _ choose _ unit ( 1e - 2 , unit = None ) [ 0 ] = = ' ms '
> > > assert _ choose _ unit ( 1e - 4 , unit = None , asciimode = True ) [ 0 ] = = ' us '
> > > assert _ choose _ unit ( 1.1 , unit = ' ns ' ) [ 0 ] = = ' ns '"""
|
from collections import OrderedDict
micro = _trychar ( 'µs' , 'us' , asciimode )
units = OrderedDict ( [ ( 's' , ( 's' , 1e0 ) ) , ( 'ms' , ( 'ms' , 1e-3 ) ) , ( 'us' , ( micro , 1e-6 ) ) , ( 'ns' , ( 'ns' , 1e-9 ) ) , ] )
if unit is None :
for suffix , mag in units . values ( ) : # pragma : nobranch
if value > mag :
break
else :
suffix , mag = units [ unit ]
return suffix , mag
|
def rank ( syllabifications ) :
'''Rank syllabifications .'''
|
# def key ( s ) :
# word = s [ 0]
# w = wsp ( word )
# p = pk _ prom ( word )
# n = nuc ( word )
# t = w + p + n
# print ( ' % s \ twsp : % s \ tpk : % s \ tnuc : % s \ ttotal : % s ' % ( word , w , p , n , t ) )
# return w + p + n
# syllabifications . sort ( key = key )
syllabifications . sort ( key = lambda s : wsp ( s [ 0 ] ) + pk_prom ( s [ 0 ] ) + nuc ( s [ 0 ] ) )
return syllabifications
|
def list_symbols ( self , all_symbols = False , snapshot = None , regex = None , ** kwargs ) :
"""Return the symbols in this library .
Parameters
all _ symbols : ` bool `
If True returns all symbols under all snapshots , even if the symbol has been deleted
in the current version ( i . e . it exists under a snapshot . . . Default : False
snapshot : ` str `
Return the symbols available under the snapshot .
regex : ` str `
filter symbols by the passed in regular expression
kwargs :
kwarg keys are used as fields to query for symbols with metadata matching
the kwargs query
Returns
String list of symbols in the library"""
|
query = { }
if regex is not None :
query [ 'symbol' ] = { '$regex' : regex }
if kwargs :
for k , v in six . iteritems ( kwargs ) : # TODO : this doesn ' t work as expected as it ignores the versions with metadata . deleted set
# as a result it will return symbols with matching metadata which have been deleted
# Maybe better add a match step in the pipeline instead of making it part of the query
query [ 'metadata.' + k ] = v
if snapshot is not None :
try :
query [ 'parent' ] = self . _snapshots . find_one ( { 'name' : snapshot } ) [ '_id' ]
except TypeError :
raise NoDataFoundException ( 'No snapshot %s in library %s' % ( snapshot , self . _arctic_lib . get_name ( ) ) )
elif all_symbols :
return self . _versions . find ( query ) . distinct ( 'symbol' )
# Return just the symbols which aren ' t deleted in the ' trunk ' of this library
pipeline = [ ]
if query : # Match based on user criteria first
pipeline . append ( { '$match' : query } )
pipeline . extend ( [ # version _ custom value is : 2 * version + ( 0 if deleted else 1)
# This is used to optimize aggregation query :
# - avoid sorting
# - be able to rely on the latest version ( max ) for the deleted status
# Be aware of that if you don ' t use custom sort or if use a sort before $ group which utilizes
# exactly an existing index , the $ group will do best effort to utilize this index :
# - https : / / jira . mongodb . org / browse / SERVER - 4507
{ '$group' : { '_id' : '$symbol' , 'version_custom' : { '$max' : { '$add' : [ { '$multiply' : [ '$version' , 2 ] } , { '$cond' : [ { '$eq' : [ '$metadata.deleted' , True ] } , 1 , 0 ] } ] } } , } } , # Don ' t include symbols which are part of some snapshot , but really deleted . . .
{ '$match' : { 'version_custom' : { '$mod' : [ 2 , 0 ] } } } ] )
# We may hit the group memory limit ( 100MB ) , so use allowDiskUse to circumvent this
# - https : / / docs . mongodb . com / manual / reference / operator / aggregation / group / # group - memory - limit
return sorted ( [ x [ '_id' ] for x in self . _versions . aggregate ( pipeline , allowDiskUse = True ) ] )
|
def account ( self , id ) :
"""Fetch account information by user ` id ` .
Does not require authentication .
Returns a ` user dict ` _ ."""
|
id = self . __unpack_id ( id )
url = '/api/v1/accounts/{0}' . format ( str ( id ) )
return self . __api_request ( 'GET' , url )
|
def findwalks ( CIJ ) :
'''Walks are sequences of linked nodes , that may visit a single node more
than once . This function finds the number of walks of a given length ,
between any two nodes .
Parameters
CIJ : NxN np . ndarray
binary directed / undirected connection matrix
Returns
Wq : NxNxQ np . ndarray
Wq [ i , j , q ] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np . ndarray
walk length distribution as a function of q
Notes
Wq grows very quickly for larger N , K , q . Weights are discarded .'''
|
CIJ = binarize ( CIJ , copy = True )
n = len ( CIJ )
Wq = np . zeros ( ( n , n , n ) )
CIJpwr = CIJ . copy ( )
Wq [ : , : , 1 ] = CIJ
for q in range ( n ) :
CIJpwr = np . dot ( CIJpwr , CIJ )
Wq [ : , : , q ] = CIJpwr
twalk = np . sum ( Wq )
# total number of walks
wlq = np . sum ( np . sum ( Wq , axis = 0 ) , axis = 0 )
return Wq , twalk , wlq
|
def mapping_of ( cls ) :
"""Expects a mapping from some key to data for ` cls ` instances ."""
|
def mapper ( data ) :
if not isinstance ( data , Mapping ) :
raise TypeError ( "data must be a mapping, not %s" % type ( data ) . __name__ )
return { key : cls ( value ) for key , value in data . items ( ) }
return mapper
|
def generate_file ( self , r , undistorted = False ) :
"""Generate file for IIIFRequest object r from this image .
FIXME - Would be nicer to have the test for an undistorted image request
based on the IIIFRequest object , and then know whether to apply canonicalization
or not .
Logically we might use ` w , h ` instead of the Image API v2.0 canonical
form ` w , ` if the api _ version is 1 . x . However , OSD 1.2.1 and 2 . x assume
the new canonical form even in the case where the API version is declared
earlier . Thus , determine whether to use the canonical or ` w , h ` form based
solely on the setting of osd _ version ."""
|
use_canonical = self . get_osd_config ( self . osd_version ) [ 'use_canonical' ]
height = None
if ( undistorted and use_canonical ) :
height = r . size_wh [ 1 ]
r . size_wh = [ r . size_wh [ 0 ] , None ]
# [ sw , sh ] - > [ sw , ]
path = r . url ( )
# Generate . . .
if ( self . dryrun ) :
self . logger . info ( "%s / %s" % ( self . dst , path ) )
else :
m = self . manipulator_klass ( api_version = self . api_version )
try :
m . derive ( srcfile = self . src , request = r , outfile = os . path . join ( self . dst , path ) )
self . logger . info ( "%s / %s" % ( self . dst , path ) )
except IIIFZeroSizeError :
self . logger . info ( "%s / %s - zero size, skipped" % ( self . dst , path ) )
return
# done if zero size
if ( r . region_full and use_canonical and height is not None ) : # In v2.0 of the spec , the canonical URI form ` w , ` for scaled
# images of the full region was introduced . This is somewhat at
# odds with the requirement for ` w , h ` specified in ` sizes ` to
# be available , and has problems of precision with tall narrow
# images . Hopefully will be fixed in 3.0 but for now symlink
# the ` w , h ` form to the ` w , ` dirs so that might use the specified
# ` w , h ` also work . See
# < https : / / github . com / IIIF / iiif . io / issues / 544 >
# FIXME - This is ugly because we duplicate code in
# iiif . request . url to construct the partial URL
region_dir = os . path . join ( r . quote ( r . identifier ) , "full" )
wh_dir = "%d,%d" % ( r . size_wh [ 0 ] , height )
wh_path = os . path . join ( region_dir , wh_dir )
wc_dir = "%d," % ( r . size_wh [ 0 ] )
wc_path = os . path . join ( region_dir , wc_dir )
if ( not self . dryrun ) :
ln = os . path . join ( self . dst , wh_path )
if ( os . path . exists ( ln ) ) :
os . remove ( ln )
os . symlink ( wc_dir , ln )
self . logger . info ( "%s / %s -> %s" % ( self . dst , wh_path , wc_path ) )
|
def cmd_migrate ( self , name = None , fake = False ) :
"""Run migrations ."""
|
from peewee_migrate . router import Router , LOGGER
LOGGER . setLevel ( 'INFO' )
LOGGER . propagate = 0
router = Router ( self . database , migrate_dir = self . app . config [ 'PEEWEE_MIGRATE_DIR' ] , migrate_table = self . app . config [ 'PEEWEE_MIGRATE_TABLE' ] )
migrations = router . run ( name , fake = fake )
if migrations :
LOGGER . warn ( 'Migrations are completed: %s' % ', ' . join ( migrations ) )
|
def process_view ( self , request , view_func , view_args , view_kwargs ) :
"""Per - request mechanics for the current page object ."""
|
# Load the closest matching page by slug , and assign it to the
# request object . If none found , skip all further processing .
slug = path_to_slug ( request . path_info )
pages = Page . objects . with_ascendants_for_slug ( slug , for_user = request . user , include_login_required = True )
if pages :
page = pages [ 0 ]
setattr ( request , "page" , page )
context_processors . page ( request )
else :
return
# Handle ` ` page . login _ required ` ` .
if page . login_required and not request . user . is_authenticated ( ) :
return redirect_to_login ( request . get_full_path ( ) )
# If the view isn ' t yacms ' s page view , try to return the result
# immediately . In the case of a 404 with an URL slug that matches a
# page exactly , swallow the exception and try yacms ' s page view .
# This allows us to set up pages with URLs that also match non - page
# urlpatterns . For example , a page could be created with the URL
# / blog / about / , which would match the blog urlpattern , and assuming
# there wasn ' t a blog post with the slug " about " , would raise a 404
# and subsequently be rendered by yacms ' s page view .
if view_func != page_view :
try :
return view_func ( request , * view_args , ** view_kwargs )
except Http404 :
if page . slug != slug :
raise
# Run page processors .
extra_context = { }
model_processors = page_processors . processors [ page . content_model ]
slug_processors = page_processors . processors [ "slug:%s" % page . slug ]
for ( processor , exact_page ) in slug_processors + model_processors :
if exact_page and not page . is_current :
continue
processor_response = processor ( request , page )
if isinstance ( processor_response , HttpResponse ) :
return processor_response
elif processor_response :
try :
for k , v in processor_response . items ( ) :
if k not in extra_context :
extra_context [ k ] = v
except ( TypeError , ValueError ) :
name = "%s.%s" % ( processor . __module__ , processor . __name__ )
error = ( "The page processor %s returned %s but must " "return HttpResponse or dict." % ( name , type ( processor_response ) ) )
raise ValueError ( error )
return page_view ( request , slug , extra_context = extra_context )
|
def add_argparser ( self , root , parents ) :
"""Add arguments for this command ."""
|
parser = root . add_parser ( 'diff' , parents = parents )
parser . set_defaults ( func = self )
parser . add_argument ( '--secrets' , dest = 'secrets' , action = 'store' , help = 'Path to the authorization secrets file (client_secrets.json).' )
parser . add_argument ( '-d' , '--data' , dest = 'data_path' , action = 'store' , default = None , help = 'Path to a existing JSON diff file.' )
parser . add_argument ( 'report_a_path' , action = 'store' , help = 'Path to a JSON file containing the initial report data.' )
parser . add_argument ( 'report_b_path' , action = 'store' , help = 'Path to a JSON file containing the report data to compare.' )
parser . add_argument ( 'output_path' , action = 'store' , help = 'Path to output either an HTML report or a JSON data file.' )
return parser
|
def frame_apply ( obj , func , axis = 0 , broadcast = None , raw = False , reduce = None , result_type = None , ignore_failures = False , args = None , kwds = None ) :
"""construct and return a row or column based frame apply object"""
|
axis = obj . _get_axis_number ( axis )
if axis == 0 :
klass = FrameRowApply
elif axis == 1 :
klass = FrameColumnApply
return klass ( obj , func , broadcast = broadcast , raw = raw , reduce = reduce , result_type = result_type , ignore_failures = ignore_failures , args = args , kwds = kwds )
|
def parse_peddy_csv ( self , f , pattern ) :
"""Parse csv output from peddy"""
|
parsed_data = dict ( )
headers = None
s_name_idx = None
for l in f [ 'f' ] . splitlines ( ) :
s = l . split ( "," )
if headers is None :
headers = s
try :
s_name_idx = [ headers . index ( "sample_id" ) ]
except ValueError :
try :
s_name_idx = [ headers . index ( "sample_a" ) , headers . index ( "sample_b" ) ]
except ValueError :
log . warn ( "Could not find sample name in Peddy output: {}" . format ( f [ 'fn' ] ) )
return None
else :
s_name = '-' . join ( [ s [ idx ] for idx in s_name_idx ] )
parsed_data [ s_name ] = dict ( )
for i , v in enumerate ( s ) :
if i not in s_name_idx :
if headers [ i ] == "error" and pattern == "sex_check" :
v = "True" if v == "False" else "False"
try : # add the pattern as a suffix to key
parsed_data [ s_name ] [ headers [ i ] + "_" + pattern ] = float ( v )
except ValueError : # add the pattern as a suffix to key
parsed_data [ s_name ] [ headers [ i ] + "_" + pattern ] = v
if len ( parsed_data ) == 0 :
return None
return parsed_data
|
def _parse_file ( cls , value ) :
"""Represents value as a string , allowing including text
from nearest files using ` file : ` directive .
Directive is sandboxed and won ' t reach anything outside
directory with setup . py .
Examples :
file : README . rst , CHANGELOG . md , src / file . txt
: param str value :
: rtype : str"""
|
include_directive = 'file:'
if not isinstance ( value , string_types ) :
return value
if not value . startswith ( include_directive ) :
return value
spec = value [ len ( include_directive ) : ]
filepaths = ( os . path . abspath ( path . strip ( ) ) for path in spec . split ( ',' ) )
return '\n' . join ( cls . _read_file ( path ) for path in filepaths if ( cls . _assert_local ( path ) or True ) and os . path . isfile ( path ) )
|
def add_information_about_person ( self , session_info ) :
"""If there already are information from this source in the cache
this function will overwrite that information"""
|
session_info = dict ( session_info )
name_id = session_info [ "name_id" ]
issuer = session_info . pop ( "issuer" )
self . cache . set ( name_id , issuer , session_info , session_info [ "not_on_or_after" ] )
return name_id
|
def axis_to_data ( ax , width ) :
"""For a width in axis coordinates , return the corresponding in data
coordinates .
Parameters
ax : matplotlib . axis
Axis object from matplotlib .
width : float
Width in xaxis coordinates ."""
|
xlim = ax . get_xlim ( )
widthx = width * ( xlim [ 1 ] - xlim [ 0 ] )
ylim = ax . get_ylim ( )
widthy = width * ( ylim [ 1 ] - ylim [ 0 ] )
return 0.5 * ( widthx + widthy )
|
def main ( ) : # pragma : no cover
"""Main entry point ."""
|
try : # Exit on broken pipe .
signal . signal ( signal . SIGPIPE , signal . SIG_DFL )
except AttributeError : # SIGPIPE is not available on Windows .
pass
try :
return _main ( sys . argv , standard_out = sys . stdout , standard_error = sys . stderr )
except KeyboardInterrupt :
return 2
|
def _nicetitle ( coord , value , maxchar , template ) :
"""Put coord , value in template and truncate at maxchar"""
|
prettyvalue = format_item ( value , quote_strings = False )
title = template . format ( coord = coord , value = prettyvalue )
if len ( title ) > maxchar :
title = title [ : ( maxchar - 3 ) ] + '...'
return title
|
def update_route53 ( self ) :
"""Update list of Route53 DNS Zones and their records for the account
Returns :
` None `"""
|
self . log . debug ( 'Updating Route53 information for {}' . format ( self . account ) )
# region Update zones
existing_zones = DNSZone . get_all ( self . account )
zones = self . __fetch_route53_zones ( )
for resource_id , data in zones . items ( ) :
if resource_id in existing_zones :
zone = DNSZone . get ( resource_id )
if zone . update ( data ) :
self . log . debug ( 'Change detected for Route53 zone {}/{}' . format ( self . account , zone . name ) )
zone . save ( )
else :
tags = data . pop ( 'tags' )
DNSZone . create ( resource_id , account_id = self . account . account_id , properties = data , tags = tags )
self . log . debug ( 'Added Route53 zone {}/{}' . format ( self . account , data [ 'name' ] ) )
db . session . commit ( )
zk = set ( zones . keys ( ) )
ezk = set ( existing_zones . keys ( ) )
for resource_id in ezk - zk :
zone = existing_zones [ resource_id ]
db . session . delete ( zone . resource )
self . log . debug ( 'Deleted Route53 zone {}/{}' . format ( self . account . account_name , zone . name . value ) )
db . session . commit ( )
# endregion
# region Update resource records
try :
for zone_id , zone in DNSZone . get_all ( self . account ) . items ( ) :
existing_records = { rec . id : rec for rec in zone . records }
records = self . __fetch_route53_zone_records ( zone . get_property ( 'zone_id' ) . value )
for data in records :
if data [ 'id' ] in existing_records :
record = existing_records [ data [ 'id' ] ]
if record . update ( data ) :
self . log . debug ( 'Changed detected for DNSRecord {}/{}/{}' . format ( self . account , zone . name , data [ 'name' ] ) )
record . save ( )
else :
record = DNSRecord . create ( data [ 'id' ] , account_id = self . account . account_id , properties = { k : v for k , v in data . items ( ) if k != 'id' } , tags = { } )
self . log . debug ( 'Added new DNSRecord {}/{}/{}' . format ( self . account , zone . name , data [ 'name' ] ) )
zone . add_record ( record )
db . session . commit ( )
rk = set ( x [ 'id' ] for x in records )
erk = set ( existing_records . keys ( ) )
for resource_id in erk - rk :
record = existing_records [ resource_id ]
zone . delete_record ( record )
self . log . debug ( 'Deleted Route53 record {}/{}/{}' . format ( self . account . account_name , zone_id , record . name ) )
db . session . commit ( )
except :
raise
|
def stop ( self ) -> None :
"""Stop monitoring the base unit ."""
|
self . _shutdown = True
# Close connection if needed
self . _protocol . close ( )
# Cancel any pending tasks
self . cancel_pending_tasks ( )
|
def make_plot_waveform_plot ( workflow , params , out_dir , ifos , exclude = None , require = None , tags = None ) :
"""Add plot _ waveform jobs to the workflow ."""
|
tags = [ ] if tags is None else tags
makedir ( out_dir )
name = 'single_template_plot'
secs = requirestr ( workflow . cp . get_subsections ( name ) , require )
secs = excludestr ( secs , exclude )
files = FileList ( [ ] )
for tag in secs :
node = PlotExecutable ( workflow . cp , 'plot_waveform' , ifos = ifos , out_dir = out_dir , tags = [ tag ] + tags ) . create_node ( )
node . add_opt ( '--mass1' , "%.6f" % params [ 'mass1' ] )
node . add_opt ( '--mass2' , "%.6f" % params [ 'mass2' ] )
node . add_opt ( '--spin1z' , "%.6f" % params [ 'spin1z' ] )
node . add_opt ( '--spin2z' , "%.6f" % params [ 'spin2z' ] )
if 'u_vals' in params : # Precessing options
node . add_opt ( '--spin1x' , "%.6f" % params [ 'spin1x' ] )
node . add_opt ( '--spin2x' , "%.6f" % params [ 'spin2x' ] )
node . add_opt ( '--spin1y' , "%.6f" % params [ 'spin1y' ] )
node . add_opt ( '--spin2y' , "%.6f" % params [ 'spin2y' ] )
node . add_opt ( '--inclination' , "%.6f" % params [ 'inclination' ] )
node . add_opt ( '--u-val' , "%.6f" % params [ 'u_vals' ] )
node . new_output_file_opt ( workflow . analysis_time , '.png' , '--output-file' )
workflow += node
files += node . output_files
return files
|
def save_array ( store , arr , ** kwargs ) :
"""Convenience function to save a NumPy array to the local file system , following a
similar API to the NumPy save ( ) function .
Parameters
store : MutableMapping or string
Store or path to directory in file system or name of zip file .
arr : ndarray
NumPy array with data to save .
kwargs
Passed through to : func : ` create ` , e . g . , compressor .
Examples
Save an array to a directory on the file system ( uses a : class : ` DirectoryStore ` ) : :
> > > import zarr
> > > import numpy as np
> > > arr = np . arange ( 10000)
> > > zarr . save _ array ( ' data / example . zarr ' , arr )
> > > zarr . load ( ' data / example . zarr ' )
array ( [ 0 , 1 , 2 , . . . , 9997 , 9998 , 9999 ] )
Save an array to a single file ( uses a : class : ` ZipStore ` ) : :
> > > zarr . save _ array ( ' data / example . zip ' , arr )
> > > zarr . load ( ' data / example . zip ' )
array ( [ 0 , 1 , 2 , . . . , 9997 , 9998 , 9999 ] )"""
|
may_need_closing = isinstance ( store , str )
store = normalize_store_arg ( store , clobber = True )
try :
_create_array ( arr , store = store , overwrite = True , ** kwargs )
finally :
if may_need_closing and hasattr ( store , 'close' ) : # needed to ensure zip file records are written
store . close ( )
|
def _loop_thread_main ( self ) :
"""Main background thread running the event loop ."""
|
asyncio . set_event_loop ( self . loop )
self . _loop_check . inside_loop = True
try :
self . _logger . debug ( "Starting loop in background thread" )
self . loop . run_forever ( )
self . _logger . debug ( "Finished loop in background thread" )
except : # pylint : disable = bare - except ; This is a background worker thread .
self . _logger . exception ( "Exception raised from event loop thread" )
finally :
self . loop . close ( )
|
def add ( self , item , header_flag = False , align = None ) :
"""Add a Cell to the row
: param item : An element to add to the Cells can be list or Cell object .
: type item : basestring , QString , list , Cell
: param header _ flag : Flag indicating it the item is a header or not .
: type header _ flag : bool
: param align : Optional alignment qualifier for all cells in the row .
: type align : basestring"""
|
if self . _is_stringable ( item ) or self . _is_qstring ( item ) :
self . cells . append ( Cell ( item , header = header_flag , align = align ) )
elif isinstance ( item , Cell ) :
self . cells . append ( item )
elif isinstance ( item , Image ) :
self . cells . append ( Cell ( item ) )
elif isinstance ( item , list ) :
for i in item :
self . cells . append ( Cell ( i , header = header_flag , align = align ) )
else :
raise InvalidMessageItemError ( item , item . __class__ )
|
def factor_for_trace ( ls : HilbertSpace , op : Operator ) -> Operator :
r'''Given a : class : ` . LocalSpace ` ` ls ` to take the partial trace over and an
operator ` op ` , factor the trace such that operators acting on disjoint
degrees of freedom are pulled out of the trace . If the operator acts
trivially on ls the trace yields only a pre - factor equal to the dimension
of ls . If there are : class : ` LocalSigma ` operators among a product , the
trace ' s cyclical property is used to move to sandwich the full product by
: class : ` LocalSigma ` operators :
. . math : :
{ \ rm Tr } A \ sigma _ { jk } B = { \ rm Tr } \ sigma _ { jk } B A \ sigma _ { jj }
Args :
ls : Degree of Freedom to trace over
op : Operator to take the trace of
Returns :
The ( partial ) trace over the operator ' s spc - degrees of freedom'''
|
if op . space == ls :
if isinstance ( op , OperatorTimes ) :
pull_out = [ o for o in op . operands if o . space is TrivialSpace ]
rest = [ o for o in op . operands if o . space is not TrivialSpace ]
if pull_out :
return ( OperatorTimes . create ( * pull_out ) * OperatorTrace . create ( OperatorTimes . create ( * rest ) , over_space = ls ) )
raise CannotSimplify ( )
if ls & op . space == TrivialSpace :
return ls . dimension * op
if ls < op . space and isinstance ( op , OperatorTimes ) :
pull_out = [ o for o in op . operands if ( o . space & ls ) == TrivialSpace ]
rest = [ o for o in op . operands if ( o . space & ls ) != TrivialSpace ]
if ( not isinstance ( rest [ 0 ] , LocalSigma ) or not isinstance ( rest [ - 1 ] , LocalSigma ) ) :
for j , r in enumerate ( rest ) :
if isinstance ( r , LocalSigma ) :
m = r . j
rest = ( rest [ j : ] + rest [ : j ] + [ LocalSigma . create ( m , m , hs = ls ) , ] )
break
if not rest :
rest = [ IdentityOperator ]
if len ( pull_out ) :
return ( OperatorTimes . create ( * pull_out ) * OperatorTrace . create ( OperatorTimes . create ( * rest ) , over_space = ls ) )
raise CannotSimplify ( )
|
def __hover ( self , ** kwargs ) :
"""This hovers the particle 1m above the bathymetry WHERE IT WOULD HAVE ENDED UP .
This is WRONG and we need to compute the location that it actually hit
the bathymetry and hover 1m above THAT ."""
|
end_point = kwargs . pop ( 'end_point' )
# The location argument here should be the point that intersected the bathymetry ,
# not the end _ point that is " through " the bathymetry .
depth = self . get_depth ( location = end_point )
return Location4D ( latitude = end_point . latitude , longitude = end_point . longitude , depth = ( depth + 1. ) )
|
def add_workflow ( self , workflow ) :
"""Add a sub - workflow to this workflow
This function adds a sub - workflow of Workflow class to this workflow .
Parent child relationships are determined by data dependencies
Parameters
workflow : Workflow instance
The sub - workflow to add to this one"""
|
workflow . in_workflow = self
self . sub_workflows += [ workflow ]
node = workflow . as_job
self . _adag . addJob ( node )
node . file . PFN ( os . path . join ( os . getcwd ( ) , node . file . name ) , site = 'local' )
self . _adag . addFile ( node . file )
for inp in self . _external_workflow_inputs :
workflow . _make_root_dependency ( inp . node )
return self
|
def layer_description_extractor ( layer , node_to_id ) :
'''get layer description .'''
|
layer_input = layer . input
layer_output = layer . output
if layer_input is not None :
if isinstance ( layer_input , Iterable ) :
layer_input = list ( map ( lambda x : node_to_id [ x ] , layer_input ) )
else :
layer_input = node_to_id [ layer_input ]
if layer_output is not None :
layer_output = node_to_id [ layer_output ]
if isinstance ( layer , StubConv ) :
return ( type ( layer ) . __name__ , layer_input , layer_output , layer . input_channel , layer . filters , layer . kernel_size , layer . stride , layer . padding , )
elif isinstance ( layer , ( StubDense , ) ) :
return [ type ( layer ) . __name__ , layer_input , layer_output , layer . input_units , layer . units , ]
elif isinstance ( layer , ( StubBatchNormalization , ) ) :
return ( type ( layer ) . __name__ , layer_input , layer_output , layer . num_features )
elif isinstance ( layer , ( StubDropout , ) ) :
return ( type ( layer ) . __name__ , layer_input , layer_output , layer . rate )
elif isinstance ( layer , StubPooling ) :
return ( type ( layer ) . __name__ , layer_input , layer_output , layer . kernel_size , layer . stride , layer . padding , )
else :
return ( type ( layer ) . __name__ , layer_input , layer_output )
|
def MLP ( num_hidden_layers = 2 , hidden_size = 512 , activation_fn = layers . Relu , num_output_classes = 10 , mode = "train" ) :
"""Multi - layer feed - forward neural network with non - linear activations ."""
|
del mode
cur_layers = [ layers . Flatten ( ) ]
for _ in range ( num_hidden_layers ) :
cur_layers += [ layers . Dense ( hidden_size ) , activation_fn ( ) ]
cur_layers += [ layers . Dense ( num_output_classes ) , layers . LogSoftmax ( ) ]
return layers . Serial ( * cur_layers )
|
def updateFgiAnnotationFromFi ( fgiContainer , fiContainer , largerBetter ) :
"""# TODO : docstring
: param fgiContainer :
: param fiContainer :
: param largerBetter :"""
|
for fgi in listvalues ( fgiContainer . container ) :
annotations = list ( )
for specfile , fiId in zip ( fgi . specfiles , fgi . featureIds ) :
fi = fiContainer . getItem ( specfile , fiId )
if not fi . isAnnotated :
continue
annotations . append ( [ fi . score , fi . peptide , fi . sequence ] )
annotations . sort ( reverse = largerBetter )
if len ( annotations ) > 0 :
fgi . isAnnotated = True
fgi . score = annotations [ 0 ] [ 0 ]
fgi . peptide = annotations [ 0 ] [ 1 ]
fgi . sequence = annotations [ 0 ] [ 2 ]
else :
fgi . isAnnotated = False
|
def _get ( url : str , headers : dict ) -> dict :
"""Make a GET call ."""
|
response = requests . get ( url , headers = headers )
data = response . json ( )
if response . status_code != 200 :
raise GoogleApiError ( { "status_code" : response . status_code , "error" : data . get ( "error" , "" ) } )
return data
|
def get_activities_for_objective ( self , objective_id = None ) :
"""Gets the activities for the given objective .
In plenary mode , the returned list contains all of the
activities mapped to the objective Id or an error results if an
Id in the supplied list is not found or inaccessible . Otherwise ,
inaccessible Activities may be omitted from the list and may
present the elements in any order including returning a unique
set .
arg : objectiveId ( osid . id . Id ) : Id of the Objective
return : ( osid . learning . ActivityList ) - list of enrollments
raise : NotFound - objectiveId not found
raise : NullArgument - objectiveId is null
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
compliance : mandatory - This method is must be implemented ."""
|
if objective_id is None :
raise NullArgument ( )
# Should also check if objective _ id exists ?
url_path = construct_url ( 'activities' , bank_id = self . _catalog_idstr , obj_id = objective_id )
return objects . ActivityList ( self . _get_request ( url_path ) )
|
def format_field ( self , value , format_spec ) :
"""Override : meth : ` string . Formatter . format _ field ` to have our
default format _ spec for : class : ` datetime . Datetime ` objects , and to
let None yield an empty string rather than ` ` None ` ` ."""
|
if isinstance ( value , datetime ) and not format_spec :
return super ( ) . format_field ( value , '%Y-%m-%d_%H-%M-%S' )
if value is None :
return ''
return super ( ) . format_field ( value , format_spec )
|
def post ( self ) :
"""POST item to Model
post :
requestBody :
description : Model schema
required : true
content :
application / json :
schema :
$ ref : ' # / components / schemas / { { self . _ _ class _ _ . _ _ name _ _ } } . post '
responses :
201:
description : Item inserted
content :
application / json :
schema :
type : object
properties :
id :
type : string
result :
$ ref : ' # / components / schemas / { { self . _ _ class _ _ . _ _ name _ _ } } . post '
400:
$ ref : ' # / components / responses / 400'
401:
$ ref : ' # / components / responses / 401'
422:
$ ref : ' # / components / responses / 422'
500:
$ ref : ' # / components / responses / 500'"""
|
if not request . is_json :
return self . response_400 ( message = "Request is not JSON" )
try :
item = self . add_model_schema . load ( request . json )
except ValidationError as err :
return self . response_422 ( message = err . messages )
# This validates custom Schema with custom validations
if isinstance ( item . data , dict ) :
return self . response_422 ( message = item . errors )
self . pre_add ( item . data )
try :
self . datamodel . add ( item . data , raise_exception = True )
self . post_add ( item . data )
return self . response ( 201 , ** { API_RESULT_RES_KEY : self . add_model_schema . dump ( item . data , many = False ) . data , "id" : self . datamodel . get_pk_value ( item . data ) , } )
except IntegrityError as e :
return self . response_422 ( message = str ( e . orig ) )
|
def create_keyspace ( name , strategy_class , replication_factor , durable_writes = True , ** replication_values ) :
"""creates a keyspace
: param name : name of keyspace to create
: param strategy _ class : keyspace replication strategy class
: param replication _ factor : keyspace replication factor
: param durable _ writes : 1.2 only , write log is bypassed if set to False
: param * * replication _ values : 1.2 only , additional values to ad to the replication data map"""
|
cluster = get_cluster ( )
if name not in cluster . metadata . keyspaces : # try the 1.2 method
replication_map = { 'class' : strategy_class , 'replication_factor' : replication_factor }
replication_map . update ( replication_values )
if strategy_class . lower ( ) != 'simplestrategy' : # Although the Cassandra documentation states for ` replication _ factor `
# that it is " Required if class is SimpleStrategy ; otherwise ,
# not used . " we get an error if it is present .
replication_map . pop ( 'replication_factor' , None )
query = """
CREATE KEYSPACE {}
WITH REPLICATION = {}
""" . format ( name , json . dumps ( replication_map ) . replace ( '"' , "'" ) )
if strategy_class != 'SimpleStrategy' :
query += " AND DURABLE_WRITES = {}" . format ( 'true' if durable_writes else 'false' )
execute ( query )
|
def reload_input_rbridge_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
reload = ET . Element ( "reload" )
config = reload
input = ET . SubElement ( reload , "input" )
rbridge_id = ET . SubElement ( input , "rbridge-id" )
rbridge_id . text = kwargs . pop ( 'rbridge_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get ( self , url , headers = { } ) :
"""Get a JSON API endpoint and return the parsed data .
: param url : str , * relative * URL ( relative to pp - admin / api endpoint )
: param headers : dict ( optional )
: returns : deferred that when fired returns the parsed data from JSON
or errbacks with ProductPagesException"""
|
# print ( ' getting % s ' % url )
headers = headers . copy ( )
headers [ 'Accept' ] = 'application/json'
url = posixpath . join ( self . url , url )
try :
response = yield treq . get ( url , headers = headers , timeout = 5 )
if response . code != 200 :
err = '%s returned %s' % ( url , response . code )
raise ProductPagesException ( err )
else :
content = yield treq . json_content ( response )
defer . returnValue ( content )
except Exception as e : # For example , if treq . get ( ) timed out , or if treq . json _ content ( )
# could not parse the JSON , etc .
# TODO : better handling here for the specific errors ?
# I suspect it ' s not good to catch Exception with inlineCallbacks
raise ProductPagesException ( 'treq error: %s' % e . message )
|
def fit_for_distance ( self ) :
"""` ` True ` ` if any of the properties are apparent magnitudes ."""
|
for prop in self . properties . keys ( ) :
if prop in self . ic . bands :
return True
return False
|
def get_timestamp ( self , ** kwargs ) :
"""Retrieves the timestamp for a given set of data"""
|
timestamp = kwargs . get ( 'timestamp' )
if not timestamp :
now = datetime . datetime . utcnow ( )
timestamp = now . strftime ( "%Y-%m-%dT%H:%M:%S" ) + ".%03d" % ( now . microsecond / 1000 ) + "Z"
return timestamp
|
def _fit_spatial ( noise , noise_temporal , mask , template , spatial_sd , temporal_sd , noise_dict , fit_thresh , fit_delta , iterations , ) :
"""Fit the noise model to match the SNR of the data
Parameters
noise : multidimensional array , float
Initial estimate of the noise
noise _ temporal : multidimensional array , float
The temporal noise that was generated by _ generate _ temporal _ noise
tr _ duration : float
What is the duration , in seconds , of each TR ?
template : 3d array , float
A continuous ( 0 - > 1 ) volume describing the likelihood a voxel
is in the brain . This can be used to contrast the brain and non
brain .
mask : 3d array , binary
The mask of the brain volume , distinguishing brain from non - brain
spatial _ sd : float
What is the standard deviation in space of the noise volume to be
generated
temporal _ sd : float
What is the standard deviation in time of the noise volume to be
generated
noise _ dict : dict
A dictionary specifying the types of noise in this experiment . The
noise types interact in important ways . First , all noise types
ending with sigma ( e . g . motion sigma ) are mixed together in
_ generate _ temporal _ noise . These values describe the proportion of
mixing of these elements . However critically , SFNR is the
parameter that describes how much noise these components contribute
to the brain . If you set the noise dict to matched then it will
fit the parameters to match the participant as best as possible .
fit _ thresh : float
What proportion of the target parameter value is sufficient
error to warrant finishing fit search .
fit _ delta : float
How much are the parameters attenuated during the fitting process ,
in terms of the proportion of difference between the target
parameter and the actual parameter
iterations : int
The first element is how many steps of fitting the SFNR and SNR
values will be performed . Usually converges after < 5 . The
second element is the number of iterations for the AR fitting .
This is much more time consuming ( has to make a new timecourse
on each iteration ) so be careful about setting this appropriately .
Returns
noise : multidimensional array , float
Generates the noise volume given these parameters"""
|
# Pull out information that is needed
dim_tr = noise . shape
base = template * noise_dict [ 'max_activity' ]
base = base . reshape ( dim_tr [ 0 ] , dim_tr [ 1 ] , dim_tr [ 2 ] , 1 )
mean_signal = ( base [ mask > 0 ] ) . mean ( )
target_snr = noise_dict [ 'snr' ]
# Iterate through different parameters to fit SNR and SFNR
spat_sd_orig = np . copy ( spatial_sd )
iteration = 0
for iteration in list ( range ( iterations ) ) : # Calculate the new metrics
new_snr = _calc_snr ( noise , mask )
# Calculate the difference between the real and simulated data
diff_snr = abs ( new_snr - target_snr ) / target_snr
# If the AR is sufficiently close then break the loop
if diff_snr < fit_thresh :
logger . info ( 'Terminated SNR fit after ' + str ( iteration ) + ' iterations.' )
break
# Convert the SFNR and SNR
spat_sd_new = mean_signal / new_snr
# Update the variable
spatial_sd -= ( ( spat_sd_new - spat_sd_orig ) * fit_delta )
# Prevent these going out of range
if spatial_sd < 0 or np . isnan ( spatial_sd ) :
spatial_sd = 10e-3
# Set up the machine noise
noise_system = _generate_noise_system ( dimensions_tr = dim_tr , spatial_sd = spatial_sd , temporal_sd = temporal_sd , )
# Sum up the noise of the brain
noise = base + ( noise_temporal * temporal_sd ) + noise_system
# Reject negative values ( only happens outside of the brain )
noise [ noise < 0 ] = 0
# Failed to converge
if iterations == 0 :
logger . info ( 'No fitting iterations were run' )
elif iteration == iterations :
logger . warning ( 'SNR failed to converge.' )
# Return the updated noise
return noise , spatial_sd
|
def cluster ( data , inputs , verbose = False ) :
"""Clusters data
Using the new offset model , this method uses a greedy algorithm to cluster
the data . It starts with all the data points in separate clusters and tests
whether combining them increases the overall log - likelihood ( LL ) . It then
iteratively joins pairs of clusters which cause the greatest increase in
the LL , until no join increases the LL .
arguments :
inputs - - the ' X ' s in a list , one item per cluster
data - - the ' Y ' s in a list , one item per cluster
returns a list of the clusters ."""
|
N = len ( data )
# Define a set of N active cluster
active = [ ]
for p in range ( 0 , N ) :
active . append ( [ p ] )
loglikes = np . zeros ( len ( active ) )
loglikes [ : ] = None
pairloglikes = np . zeros ( [ len ( active ) , len ( active ) ] )
pairloglikes [ : ] = None
pairoffset = np . zeros ( [ len ( active ) , len ( active ) ] )
it = 0
while True :
if verbose :
it += 1
print ( "Iteration %d" % it )
# Compute the log - likelihood of each cluster ( add them together )
for clusti in range ( len ( active ) ) :
if verbose :
sys . stdout . write ( '.' )
sys . stdout . flush ( )
if np . isnan ( loglikes [ clusti ] ) :
loglikes [ clusti ] , unused_offset = get_log_likelihood_offset ( inputs , data , [ clusti ] )
# try combining with each other cluster . . .
for clustj in range ( clusti ) : # count from 0 to clustj - 1
temp = [ clusti , clustj ]
if np . isnan ( pairloglikes [ clusti , clustj ] ) :
pairloglikes [ clusti , clustj ] , pairoffset [ clusti , clustj ] = get_log_likelihood_offset ( inputs , data , temp )
seploglikes = np . repeat ( loglikes [ : , None ] . T , len ( loglikes ) , 0 ) + np . repeat ( loglikes [ : , None ] , len ( loglikes ) , 1 )
loglikeimprovement = pairloglikes - seploglikes
# how much likelihood improves with clustering
top = np . unravel_index ( np . nanargmax ( pairloglikes - seploglikes ) , pairloglikes . shape )
# if loglikeimprovement . shape [ 0 ] < 3:
# # no more clustering to do - this shouldn ' t happen really unless
# # we ' ve set the threshold to apply clustering to less than 0
# break
# if theres further clustering to be done . . .
if loglikeimprovement [ top [ 0 ] , top [ 1 ] ] > 0 :
active [ top [ 0 ] ] . extend ( active [ top [ 1 ] ] )
offset = pairoffset [ top [ 0 ] , top [ 1 ] ]
inputs [ top [ 0 ] ] = np . vstack ( [ inputs [ top [ 0 ] ] , inputs [ top [ 1 ] ] - offset ] )
data [ top [ 0 ] ] = np . hstack ( [ data [ top [ 0 ] ] , data [ top [ 1 ] ] ] )
del inputs [ top [ 1 ] ]
del data [ top [ 1 ] ]
del active [ top [ 1 ] ]
# None = message to say we need to recalculate
pairloglikes [ : , top [ 0 ] ] = None
pairloglikes [ top [ 0 ] , : ] = None
pairloglikes = np . delete ( pairloglikes , top [ 1 ] , 0 )
pairloglikes = np . delete ( pairloglikes , top [ 1 ] , 1 )
loglikes [ top [ 0 ] ] = None
loglikes = np . delete ( loglikes , top [ 1 ] )
else :
break
# if loglikeimprovement [ top [ 0 ] , top [ 1 ] ] > 0:
# print " joined "
# print top
# print offset
# print offsets
# print offsets [ top [ 1 ] ] - offsets [ top [ 0 ] ]
# TODO Add a way to return the offsets applied to all the time series
return active
|
def get_media_metadata ( self , item_id ) :
"""Get metadata for a media item .
Args :
item _ id ( str ) : The item for which metadata is required .
Returns :
~ collections . OrderedDict : The item ' s metadata , or ` None `
See also :
The Sonos ` getMediaMetadata API
< http : / / musicpartners . sonos . com / node / 83 > ` _"""
|
response = self . soap_client . call ( 'getMediaMetadata' , [ ( 'id' , item_id ) ] )
return response . get ( 'getMediaMetadataResult' , None )
|
def create_config_files ( directory ) :
"""Initialize directory ready for vpn walker
: param directory : the path where you want this to happen
: return :"""
|
# Some constant strings
vpn_gate_url = "http://www.vpngate.net/api/iphone/"
if not os . path . exists ( directory ) :
os . makedirs ( directory )
# get csv into memory
csv_str = ""
logging . info ( "Downloading info from VPN Gate API..." )
r = requests . get ( vpn_gate_url )
for line in r . text . split ( '\n' ) :
csv_str += line . encode ( 'utf-8' )
csv_str += "\n"
# convert csv string to string IO
f = StringIO . StringIO ( csv_str )
# generate vpn dict
vpn_dict = { }
reader = csv . reader ( f )
reader . next ( )
reader . next ( )
for row in reader :
if len ( row ) == 15 :
alpha2 = row [ 6 ]
vpn_dict [ alpha2 ] = vpn_dict . get ( alpha2 , [ ] )
vpn_dict [ alpha2 ] . append ( { "vpn_name" : row [ 0 ] , "ip" : row [ 1 ] , "country_name" : row [ 5 ] , "alpha2" : alpha2 , "openvpn_config" : b64decode ( row [ - 1 ] ) } )
f . close ( )
server_country = { }
# write config files
for country in vpn_dict :
for data in vpn_dict [ country ] :
config_filename = "{}.ovpn" . format ( data [ 'ip' ] )
file_path = os . path . join ( directory , config_filename )
with open ( file_path , 'w' ) as f :
f . write ( data [ 'openvpn_config' ] )
f . write ( "up /etc/openvpn/update-resolv-conf\n" )
f . write ( "down /etc/openvpn/update-resolv-conf\n" )
server_country [ data [ 'ip' ] ] = country
with open ( os . path . join ( directory , 'servers.txt' ) , 'w' ) as f :
for ip in server_country :
f . write ( '|' . join ( [ ip , server_country [ ip ] ] ) + '\n' )
|
def _lock ( self ) :
"""Lock the config DB ."""
|
if not self . locked :
self . device . cu . lock ( )
self . locked = True
|
def post_process ( self , post_list , req , resp , params ) :
"""Post - process the extensions for the action . If any
post - processing extension ( specified by ` post _ list ` , which
should be generated by the pre _ process ( ) method ) yields a
value which tests as True , the response being considered by
post - processing extensions is updated to be that value .
Returns the final response ."""
|
# Walk through the post - processing extensions
for ext in post_list :
if inspect . isgenerator ( ext ) :
try :
result = ext . send ( resp )
except StopIteration : # Expected , but not required
result = None
# If it returned a response , use that for subsequent
# processing
if result :
resp = self . wrap ( req , result )
else :
result = ext ( req , resp , ** params )
# If it returned a response , use that for subsequent
# processing
if result :
resp = self . wrap ( req , result )
return resp
|
def orthogonal ( * args ) -> bool :
"""Determine if a set of arrays are orthogonal .
Parameters
args : array - likes or array shapes
Returns
bool
Array orthogonality condition ."""
|
for i , arg in enumerate ( args ) :
if hasattr ( arg , "shape" ) :
args [ i ] = arg . shape
for s in zip ( * args ) :
if np . product ( s ) != max ( s ) :
return False
return True
|
def read ( self , size = - 1 ) :
"""Read bytes and update the progress bar ."""
|
buf = self . _fd . read ( size )
self . _progress_cb ( len ( buf ) )
return buf
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.