signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def backend ( self ) :
'''Returns the : class : ` stdnet . BackendStructure ` .'''
|
session = self . session
if session is not None :
if self . _field :
return session . model ( self . _field . model ) . backend
else :
return session . model ( self ) . backend
|
def purge_stream ( self , stream_id , remove_definition = False , sandbox = None ) :
"""Purge the stream
: param stream _ id : The stream identifier
: param remove _ definition : Whether to remove the stream definition as well
: param sandbox : The sandbox for this stream
: return : None"""
|
super ( AssetsChannel , self ) . purge_stream ( stream_id = stream_id , remove_definition = remove_definition , sandbox = sandbox )
|
def findOrCreate ( self , userItemClass , __ifnew = None , ** attrs ) :
"""Usage : :
s . findOrCreate ( userItemClass [ , function ] [ , x = 1 , y = 2 , . . . ] )
Example : :
class YourItemType ( Item ) :
a = integer ( )
b = text ( )
c = integer ( )
def f ( x ) :
print x , \" - - it ' s new ! \"
s . findOrCreate ( YourItemType , f , a = 1 , b = u ' 2 ' )
Search for an item with columns in the database that match the passed
set of keyword arguments , returning the first match if one is found ,
creating one with the given attributes if not . Takes an optional
positional argument function to call on the new item if it is new ."""
|
andargs = [ ]
for k , v in attrs . iteritems ( ) :
col = getattr ( userItemClass , k )
andargs . append ( col == v )
if len ( andargs ) == 0 :
cond = [ ]
elif len ( andargs ) == 1 :
cond = [ andargs [ 0 ] ]
else :
cond = [ attributes . AND ( * andargs ) ]
for result in self . query ( userItemClass , * cond ) :
return result
newItem = userItemClass ( store = self , ** attrs )
if __ifnew is not None :
__ifnew ( newItem )
return newItem
|
def absolute_path ( string ) :
'''" Convert " a string to a string that is an absolute existing path .'''
|
if not os . path . isabs ( string ) :
msg = '{0!r} is not an absolute path' . format ( string )
raise argparse . ArgumentTypeError ( msg )
if not os . path . exists ( os . path . dirname ( string ) ) :
msg = 'path {0!r} does not exist' . format ( string )
raise argparse . ArgumentTypeError ( msg )
return string
|
def syncDependencies ( self , recursive = False ) :
"""Syncs the dependencies for this item to the view .
: param recurisve | < bool >"""
|
scene = self . viewItem ( ) . scene ( )
if not scene :
return
visible = self . viewItem ( ) . isVisible ( )
depViewItems = self . _dependencies . values ( )
depViewItems += self . _reverseDependencies . values ( )
for depViewItem in depViewItems :
if not depViewItem . scene ( ) :
scene . addItem ( depViewItem )
depViewItem . rebuild ( )
depViewItem . setVisible ( visible )
if recursive :
for c in range ( self . childCount ( ) ) :
self . child ( c ) . syncDependencies ( recursive = True )
|
def edit ( text = None , editor = None , env = None , require_save = True , extension = '.txt' , filename = None ) :
r"""Edits the given text in the defined editor . If an editor is given
( should be the full path to the executable but the regular operating
system search path is used for finding the executable ) it overrides
the detected editor . Optionally , some environment variables can be
used . If the editor is closed without changes , ` None ` is returned . In
case a file is edited directly the return value is always ` None ` and
` require _ save ` and ` extension ` are ignored .
If the editor cannot be opened a : exc : ` UsageError ` is raised .
Note for Windows : to simplify cross - platform usage , the newlines are
automatically converted from POSIX to Windows and vice versa . As such ,
the message here will have ` ` \ n ` ` as newline markers .
: param text : the text to edit .
: param editor : optionally the editor to use . Defaults to automatic
detection .
: param env : environment variables to forward to the editor .
: param require _ save : if this is true , then not saving in the editor
will make the return value become ` None ` .
: param extension : the extension to tell the editor about . This defaults
to ` . txt ` but changing this might change syntax
highlighting .
: param filename : if provided it will edit this file instead of the
provided text contents . It will not use a temporary
file as an indirection in that case ."""
|
from . _termui_impl import Editor
editor = Editor ( editor = editor , env = env , require_save = require_save , extension = extension )
if filename is None :
return editor . edit ( text )
editor . edit_file ( filename )
|
def get_node ( self , name , memory = False , binary = False ) :
"""An individual node in the RabbitMQ cluster . Set " memory = true " to get
memory statistics , and " binary = true " to get a breakdown of binary
memory use ( may be expensive if there are many small binaries in the
system ) ."""
|
return self . _api_get ( url = '/api/nodes/{0}' . format ( name ) , params = dict ( binary = binary , memory = memory , ) , )
|
def run ( configObj , wcsmap = None ) :
"""Interface for running ` wdrizzle ` from TEAL or Python command - line .
This code performs all file ` ` I / O ` ` to set up the use of the drizzle code for
a single exposure to replicate the functionality of the original ` wdrizzle ` ."""
|
# Insure all output filenames specified have . fits extensions
if configObj [ 'outdata' ] [ - 5 : ] != '.fits' :
configObj [ 'outdata' ] += '.fits'
if not util . is_blank ( configObj [ 'outweight' ] ) and configObj [ 'outweight' ] [ - 5 : ] != '.fits' :
configObj [ 'outweight' ] += '.fits'
if not util . is_blank ( configObj [ 'outcontext' ] ) and configObj [ 'outcontext' ] [ - 5 : ] != '.fits' :
configObj [ 'outcontext' ] += '.fits'
# Keep track of any files we need to open
in_sci_handle = None
in_wht_handle = None
out_sci_handle = None
out_wht_handle = None
out_con_handle = None
_wcskey = configObj [ 'wcskey' ]
if util . is_blank ( _wcskey ) :
_wcskey = ' '
scale_pars = configObj [ 'Data Scaling Parameters' ]
user_wcs_pars = configObj [ 'User WCS Parameters' ]
# Open the SCI ( and WHT ? ) image
# read file to get science array
insci = get_data ( configObj [ 'input' ] )
expin = fileutil . getKeyword ( configObj [ 'input' ] , scale_pars [ 'expkey' ] )
in_sci_phdr = fits . getheader ( fileutil . parseFilename ( configObj [ 'input' ] ) [ 0 ] , memmap = False )
# we need to read in the input WCS
input_wcs = stwcs . wcsutil . HSTWCS ( configObj [ 'input' ] , wcskey = _wcskey )
if not util . is_blank ( configObj [ 'inweight' ] ) :
inwht = get_data ( configObj [ 'inweight' ] ) . astype ( np . float32 )
else : # Generate a default weight map of all good pixels
inwht = np . ones ( insci . shape , dtype = insci . dtype )
output_exists = False
outname = fileutil . osfn ( fileutil . parseFilename ( configObj [ 'outdata' ] ) [ 0 ] )
if os . path . exists ( outname ) :
output_exists = True
# Output was specified as a filename , so open it in ' update ' mode
outsci = get_data ( configObj [ 'outdata' ] )
if output_exists : # we also need to read in the output WCS from pre - existing output
output_wcs = stwcs . wcsutil . HSTWCS ( configObj [ 'outdata' ] )
out_sci_hdr = fits . getheader ( outname , memmap = False )
outexptime = out_sci_hdr [ 'DRIZEXPT' ]
if 'ndrizim' in out_sci_hdr :
uniqid = out_sci_hdr [ 'ndrizim' ] + 1
else :
uniqid = 1
else : # otherwise , define the output WCS either from user pars or refimage
if util . is_blank ( configObj [ 'User WCS Parameters' ] [ 'refimage' ] ) : # Define a WCS based on user provided WCS values
# NOTE :
# All parameters must be specified , not just one or a few
if not util . is_blank ( user_wcs_pars [ 'outscale' ] ) :
output_wcs = wcs_functions . build_hstwcs ( user_wcs_pars [ 'raref' ] , user_wcs_pars [ 'decref' ] , user_wcs_pars [ 'xrefpix' ] , user_wcs_pars [ 'yrefpix' ] , user_wcs_pars [ 'outnx' ] , user_wcs_pars [ 'outny' ] , user_wcs_pars [ 'outscale' ] , user_wcs_pars [ 'orient' ] )
else : # Define default WCS based on input image
applydist = True
if input_wcs . sip is None or input_wcs . instrument == 'DEFAULT' :
applydist = False
output_wcs = stwcs . distortion . utils . output_wcs ( [ input_wcs ] , undistort = applydist )
else :
refimage = configObj [ 'User WCS Parameters' ] [ 'refimage' ]
refroot , extroot = fileutil . parseFilename ( refimage )
if extroot is None :
fimg = fits . open ( refroot , memmap = False )
for i , extn in enumerate ( fimg ) :
if 'CRVAL1' in extn . header : # Key on CRVAL1 for valid WCS
refwcs = wcsutil . HSTWCS ( '{}[{}]' . format ( refroot , i ) )
if refwcs . wcs . has_cd ( ) :
extroot = i
break
fimg . close ( )
# try to find extension with valid WCS
refimage = "{}[{}]" . format ( refroot , extroot )
# Define the output WCS based on a user specified reference image WCS
output_wcs = stwcs . wcsutil . HSTWCS ( refimage )
# Initialize values used for combining results
outexptime = 0.0
uniqid = 1
# Set up the output data array and insure that the units for that array is ' cps '
if outsci is None : # Define a default blank array based on definition of output _ wcs
outsci = np . empty ( output_wcs . array_shape , dtype = np . float32 )
outsci . fill ( np . nan )
else : # Convert array to units of ' cps ' , if needed
if outexptime != 0.0 :
np . divide ( outsci , outexptime , outsci )
outsci = outsci . astype ( np . float32 )
# Now update output exposure time for additional input file
outexptime += expin
outwht = None
if not util . is_blank ( configObj [ 'outweight' ] ) :
outwht = get_data ( configObj [ 'outweight' ] )
if outwht is None :
outwht = np . zeros ( output_wcs . array_shape , dtype = np . float32 )
else :
outwht = outwht . astype ( np . float32 )
outcon = None
keep_con = False
if not util . is_blank ( configObj [ 'outcontext' ] ) :
outcon = get_data ( configObj [ 'outcontext' ] )
keep_con = True
if outcon is None :
outcon = np . zeros ( ( 1 , ) + output_wcs . array_shape , dtype = np . int32 )
else :
outcon = outcon . astype ( np . int32 )
planeid = int ( ( uniqid - 1 ) / 32 )
# Add a new plane to the context image if planeid overflows
while outcon . shape [ 0 ] <= planeid :
plane = np . zeros_like ( outcon [ 0 ] )
outcon = np . append ( outcon , plane , axis = 0 )
# Interpret wt _ scl parameter
if configObj [ 'wt_scl' ] == 'exptime' :
wt_scl = expin
elif configObj [ 'wt_scl' ] == 'expsq' :
wt_scl = expin * expin
else :
wt_scl = float ( configObj [ 'wt_scl' ] )
# Interpret coeffs parameter to determine whether to apply coeffs or not
undistort = True
if not configObj [ 'coeffs' ] or input_wcs . sip is None or input_wcs . instrument == 'DEFAULT' :
undistort = False
# turn off use of coefficients if undistort is False ( coeffs = = False )
if not undistort :
input_wcs . sip = None
input_wcs . cpdis1 = None
input_wcs . cpdis2 = None
input_wcs . det2im = None
wcslin = distortion . utils . output_wcs ( [ input_wcs ] , undistort = undistort )
# Perform actual drizzling now . . .
_vers = do_driz ( insci , input_wcs , inwht , output_wcs , outsci , outwht , outcon , expin , scale_pars [ 'in_units' ] , wt_scl , wcslin_pscale = wcslin . pscale , uniqid = uniqid , pixfrac = configObj [ 'pixfrac' ] , kernel = configObj [ 'kernel' ] , fillval = scale_pars [ 'fillval' ] , stepsize = configObj [ 'stepsize' ] , wcsmap = None )
out_sci_handle , outextn = create_output ( configObj [ 'outdata' ] )
if not output_exists : # Also , define default header based on input image Primary header
out_sci_handle [ outextn ] . header = in_sci_phdr . copy ( )
# Update header of output image with exptime used to scale the output data
# if out _ units is not counts , this will simply be a value of 1.0
# the keyword ' exptime ' will always contain the total exposure time
# of all input image regardless of the output units
out_sci_handle [ outextn ] . header [ 'EXPTIME' ] = outexptime
# create CTYPE strings
ctype1 = input_wcs . wcs . ctype [ 0 ]
ctype2 = input_wcs . wcs . ctype [ 1 ]
if ctype1 . find ( '-SIP' ) :
ctype1 = ctype1 . replace ( '-SIP' , '' )
if ctype2 . find ( '-SIP' ) :
ctype2 = ctype2 . replace ( '-SIP' , '' )
# Update header with WCS keywords
out_sci_handle [ outextn ] . header [ 'ORIENTAT' ] = output_wcs . orientat
out_sci_handle [ outextn ] . header [ 'CD1_1' ] = output_wcs . wcs . cd [ 0 ] [ 0 ]
out_sci_handle [ outextn ] . header [ 'CD1_2' ] = output_wcs . wcs . cd [ 0 ] [ 1 ]
out_sci_handle [ outextn ] . header [ 'CD2_1' ] = output_wcs . wcs . cd [ 1 ] [ 0 ]
out_sci_handle [ outextn ] . header [ 'CD2_2' ] = output_wcs . wcs . cd [ 1 ] [ 1 ]
out_sci_handle [ outextn ] . header [ 'CRVAL1' ] = output_wcs . wcs . crval [ 0 ]
out_sci_handle [ outextn ] . header [ 'CRVAL2' ] = output_wcs . wcs . crval [ 1 ]
out_sci_handle [ outextn ] . header [ 'CRPIX1' ] = output_wcs . wcs . crpix [ 0 ]
out_sci_handle [ outextn ] . header [ 'CRPIX2' ] = output_wcs . wcs . crpix [ 1 ]
out_sci_handle [ outextn ] . header [ 'CTYPE1' ] = ctype1
out_sci_handle [ outextn ] . header [ 'CTYPE2' ] = ctype2
out_sci_handle [ outextn ] . header [ 'VAFACTOR' ] = 1.0
if scale_pars [ 'out_units' ] == 'counts' :
np . multiply ( outsci , outexptime , outsci )
out_sci_handle [ outextn ] . header [ 'DRIZEXPT' ] = outexptime
else :
out_sci_handle [ outextn ] . header [ 'DRIZEXPT' ] = 1.0
# Update header keyword NDRIZIM to keep track of how many images have
# been combined in this product so far
out_sci_handle [ outextn ] . header [ 'NDRIZIM' ] = uniqid
# define keywords to be written out to product header
drizdict = outputimage . DRIZ_KEYWORDS . copy ( )
# Update drizdict with current values
drizdict [ 'VER' ] [ 'value' ] = _vers [ : 44 ]
drizdict [ 'DATA' ] [ 'value' ] = configObj [ 'input' ] [ : 64 ]
drizdict [ 'DEXP' ] [ 'value' ] = expin
drizdict [ 'OUDA' ] [ 'value' ] = configObj [ 'outdata' ] [ : 64 ]
drizdict [ 'OUWE' ] [ 'value' ] = configObj [ 'outweight' ] [ : 64 ]
drizdict [ 'OUCO' ] [ 'value' ] = configObj [ 'outcontext' ] [ : 64 ]
drizdict [ 'MASK' ] [ 'value' ] = configObj [ 'inweight' ] [ : 64 ]
drizdict [ 'WTSC' ] [ 'value' ] = wt_scl
drizdict [ 'KERN' ] [ 'value' ] = configObj [ 'kernel' ]
drizdict [ 'PIXF' ] [ 'value' ] = configObj [ 'pixfrac' ]
drizdict [ 'OUUN' ] [ 'value' ] = scale_pars [ 'out_units' ]
drizdict [ 'FVAL' ] [ 'value' ] = scale_pars [ 'fillval' ]
drizdict [ 'WKEY' ] [ 'value' ] = configObj [ 'wcskey' ]
outputimage . writeDrizKeywords ( out_sci_handle [ outextn ] . header , uniqid , drizdict )
# add output array to output file
out_sci_handle [ outextn ] . data = outsci
out_sci_handle . close ( )
if not util . is_blank ( configObj [ 'outweight' ] ) :
out_wht_handle , outwhtext = create_output ( configObj [ 'outweight' ] )
out_wht_handle [ outwhtext ] . header = out_sci_handle [ outextn ] . header . copy ( )
out_wht_handle [ outwhtext ] . data = outwht
out_wht_handle . close ( )
if keep_con :
out_con_handle , outconext = create_output ( configObj [ 'outcontext' ] )
out_con_handle [ outconext ] . data = outcon
out_con_handle . close ( )
|
def rsys2graph ( rsys , fname , output_dir = None , prog = None , save = False , ** kwargs ) :
"""Convenience function to call ` rsys2dot ` and write output to file
and render the graph
Parameters
rsys : ReactionSystem
fname : str
filename
output _ dir : str ( optional )
path to directory ( default : temporary directory )
prog : str ( optional )
default : ' dot '
save : bool
removes temporary directory if False , default : False
\\ * \\ * kwargs :
Keyword arguments passed along to py : func : ` rsys2dot ` .
Returns
str
Outpath
Examples
> > > rsys2graph ( rsys , sbstncs , ' / tmp / out . png ' ) # doctest : + SKIP"""
|
lines = rsys2dot ( rsys , ** kwargs )
created_tempdir = False
try :
if output_dir is None :
output_dir = tempfile . mkdtemp ( )
created_tempdir = True
basename , ext = os . path . splitext ( os . path . basename ( fname ) )
outpath = os . path . join ( output_dir , fname )
dotpath = os . path . join ( output_dir , basename + '.dot' )
with open ( dotpath , 'wt' ) as ofh :
ofh . writelines ( lines )
if ext == '.tex' :
cmds = [ prog or 'dot2tex' ]
else :
cmds = [ prog or 'dot' , '-T' + outpath . split ( '.' ) [ - 1 ] ]
p = subprocess . Popen ( cmds + [ dotpath , '-o' , outpath ] )
retcode = p . wait ( )
if retcode :
fmtstr = "{}\n returned with exit status {}"
raise RuntimeError ( fmtstr . format ( ' ' . join ( cmds ) , retcode ) )
return outpath
finally :
if save is True or save == 'True' :
pass
else :
if save is False or save == 'False' :
if created_tempdir :
shutil . rmtree ( output_dir )
else : # interpret save as path to copy pdf to .
shutil . copy ( outpath , save )
|
def setWidth ( self , typeID , width ) :
"""setWidth ( string , double ) - > None
Sets the width in m of vehicles of this type ."""
|
self . _connection . _sendDoubleCmd ( tc . CMD_SET_VEHICLETYPE_VARIABLE , tc . VAR_WIDTH , typeID , width )
|
def run_through ( script , ensemble , roles = 1 , strict = False ) :
""": py : class : ` turberfield . dialogue . model . SceneScript ` ."""
|
with script as dialogue :
selection = dialogue . select ( ensemble , roles = roles )
if not any ( selection . values ( ) ) or strict and not all ( selection . values ( ) ) :
return
try :
model = dialogue . cast ( selection ) . run ( )
except ( AttributeError , ValueError ) as e :
log = logging . getLogger ( "turberfield.dialogue.player.run_through" )
log . warning ( ". " . join ( getattr ( e , "args" , e ) or e ) )
return
else :
yield from model
|
def join ( self , queue_name , * , fail_fast = False , timeout = None ) :
"""Wait for all the messages on the given queue to be
processed . This method is only meant to be used in tests
to wait for all the messages in a queue to be processed .
Raises :
QueueJoinTimeout : When the timeout elapses .
QueueNotFound : If the given queue was never declared .
Parameters :
queue _ name ( str ) : The queue to wait on .
fail _ fast ( bool ) : When this is True and any message gets
dead - lettered during the join , then an exception will be
raised . This will be True by default starting with
version 2.0.
timeout ( Optional [ int ] ) : The max amount of time , in
milliseconds , to wait on this queue ."""
|
try :
queues = [ self . queues [ queue_name ] , self . queues [ dq_name ( queue_name ) ] , ]
except KeyError :
raise QueueNotFound ( queue_name )
deadline = timeout and time . monotonic ( ) + timeout / 1000
while True :
for queue in queues :
timeout = deadline and deadline - time . monotonic ( )
join_queue ( queue , timeout = timeout )
# We cycle through $ queue then $ queue . DQ then $ queue
# again in case the messages that were on the DQ got
# moved back on $ queue .
for queue in queues :
if queue . unfinished_tasks :
break
else :
if fail_fast :
for message in self . dead_letters_by_queue [ queue_name ] :
raise message . _exception from None
return
|
def find_any_reports ( self , usage_page = 0 , usage_id = 0 ) :
"""Find any report type referencing HID usage control / data item .
Results are returned in a dictionary mapping report _ type to usage
lists ."""
|
items = [ ( HidP_Input , self . find_input_reports ( usage_page , usage_id ) ) , ( HidP_Output , self . find_output_reports ( usage_page , usage_id ) ) , ( HidP_Feature , self . find_feature_reports ( usage_page , usage_id ) ) , ]
return dict ( [ ( t , r ) for t , r in items if r ] )
|
def get_lastblock ( cls , impl , working_dir ) :
"""What was the last block processed ?
Return the number on success
Return None on failure to read"""
|
if not cls . db_exists ( impl , working_dir ) :
return None
con = cls . db_open ( impl , working_dir )
query = 'SELECT MAX(block_id) FROM snapshots;'
rows = cls . db_query_execute ( con , query , ( ) , verbose = False )
ret = None
for r in rows :
ret = r [ 'MAX(block_id)' ]
con . close ( )
return ret
|
def report_bar ( bytes_so_far , total_size , speed , eta ) :
'''This callback for the download function is used to print the download bar'''
|
percent = int ( bytes_so_far * 100 / total_size )
current = approximate_size ( bytes_so_far ) . center ( 9 )
total = approximate_size ( total_size ) . center ( 9 )
shaded = int ( float ( bytes_so_far ) / total_size * AVAIL_WIDTH )
sys . stdout . write ( " {0}% [{1}{2}{3}] {4}/{5} {6} eta{7}" . format ( str ( percent ) . center ( 4 ) , '=' * ( shaded - 1 ) , '>' , ' ' * ( AVAIL_WIDTH - shaded ) , current , total , ( approximate_size ( speed ) + '/s' ) . center ( 11 ) , eta . center ( 10 ) ) )
sys . stdout . write ( "\r" )
sys . stdout . flush ( )
|
def peek ( self ) :
"""Peek at the oldest reading in this virtual stream ."""
|
if self . reading is None :
raise StreamEmptyError ( "peek called on virtual stream walker without any data" , selector = self . selector )
return self . reading
|
def _get_class_name ( error_code ) :
"""Gets the corresponding class name for the given error code ,
this either being an integer ( thus base error name ) or str ."""
|
if isinstance ( error_code , int ) :
return KNOWN_BASE_CLASSES . get ( error_code , 'RPCError' + str ( error_code ) . replace ( '-' , 'Neg' ) )
return snake_to_camel_case ( error_code . replace ( 'FIRSTNAME' , 'FIRST_NAME' ) . lower ( ) , suffix = 'Error' )
|
def rightClick ( x = None , y = None , duration = 0.0 , tween = linear , pause = None , _pause = True ) :
"""Performs a right mouse button click .
This is a wrapper function for click ( ' right ' , x , y ) .
The x and y parameters detail where the mouse event happens . If None , the
current mouse position is used . If a float value , it is rounded down . If
outside the boundaries of the screen , the event happens at edge of the
screen .
Args :
x ( int , float , None , tuple , optional ) : The x position on the screen where the
click happens . None by default . If tuple , this is used for x and y .
If x is a str , it ' s considered a filename of an image to find on
the screen with locateOnScreen ( ) and click the center of .
y ( int , float , None , optional ) : The y position on the screen where the
click happens . None by default .
Returns :
None"""
|
_failSafeCheck ( )
click ( x , y , 1 , 0.0 , 'right' , _pause = False )
_autoPause ( pause , _pause )
|
def histogram_pb ( tag , data , buckets = None , description = None ) :
"""Create a histogram summary protobuf .
Arguments :
tag : String tag for the summary .
data : A ` np . array ` or array - like form of any shape . Must have type
castable to ` float ` .
buckets : Optional positive ` int ` . The output will have this
many buckets , except in two edge cases . If there is no data , then
there are no buckets . If there is data but all points have the
same value , then there is one bucket whose left and right
endpoints are the same .
description : Optional long - form description for this summary , as a
` str ` . Markdown is supported . Defaults to empty .
Returns :
A ` summary _ pb2 . Summary ` protobuf object ."""
|
bucket_count = DEFAULT_BUCKET_COUNT if buckets is None else buckets
data = np . array ( data ) . flatten ( ) . astype ( float )
if data . size == 0 :
buckets = np . array ( [ ] ) . reshape ( ( 0 , 3 ) )
else :
min_ = np . min ( data )
max_ = np . max ( data )
range_ = max_ - min_
if range_ == 0 :
center = min_
buckets = np . array ( [ [ center - 0.5 , center + 0.5 , float ( data . size ) ] ] )
else :
bucket_width = range_ / bucket_count
offsets = data - min_
bucket_indices = np . floor ( offsets / bucket_width ) . astype ( int )
clamped_indices = np . minimum ( bucket_indices , bucket_count - 1 )
one_hots = ( np . array ( [ clamped_indices ] ) . transpose ( ) == np . arange ( 0 , bucket_count ) )
# broadcast
assert one_hots . shape == ( data . size , bucket_count ) , ( one_hots . shape , ( data . size , bucket_count ) )
bucket_counts = np . sum ( one_hots , axis = 0 )
edges = np . linspace ( min_ , max_ , bucket_count + 1 )
left_edges = edges [ : - 1 ]
right_edges = edges [ 1 : ]
buckets = np . array ( [ left_edges , right_edges , bucket_counts ] ) . transpose ( )
tensor = tensor_util . make_tensor_proto ( buckets , dtype = np . float64 )
summary_metadata = metadata . create_summary_metadata ( display_name = None , description = description )
summary = summary_pb2 . Summary ( )
summary . value . add ( tag = tag , metadata = summary_metadata , tensor = tensor )
return summary
|
def objective_names ( lang = "en" ) :
"""This resource returns a list of the localized WvW objective names for
the specified language .
: param lang : The language to query the names for .
: return : A dictionary mapping the objective Ids to the names .
* Note that these are not the names displayed in the game , but rather the
abstract type . *"""
|
params = { "lang" : lang }
cache_name = "objective_names.%(lang)s.json" % params
data = get_cached ( "wvw/objective_names.json" , cache_name , params = params )
return dict ( [ ( objective [ "id" ] , objective [ "name" ] ) for objective in data ] )
|
def launch_workflow ( seqs_fp , working_dir , mean_error , error_dist , indel_prob , indel_max , trim_length , left_trim_length , min_size , ref_fp , ref_db_fp , threads_per_sample = 1 , sim_thresh = None , coverage_thresh = None ) :
"""Launch full deblur workflow for a single post split - libraries fasta file
Parameters
seqs _ fp : string
a post split library fasta file for debluring
working _ dir : string
working directory path
mean _ error : float
mean error for original sequence estimate
error _ dist : list
list of error probabilities for each hamming distance
indel _ prob : float
insertion / deletion ( indel ) probability
indel _ max : integer
maximal indel number
trim _ length : integer
sequence trim length
left _ trim _ length : integer
trim the first n reads
min _ size : integer
upper limit on sequence abundance ( discard sequences below limit )
ref _ fp : tuple
filepath ( s ) to FASTA reference database for artifact removal
ref _ db _ fp : tuple
filepath ( s ) to SortMeRNA indexed database for artifact removal
threads _ per _ sample : integer , optional
number of threads to use for SortMeRNA / mafft / vsearch
(0 for max available )
sim _ thresh : float , optional
the minimal similarity for a sequence to the database .
if None , take the defaults ( 0.65 for negate = False ,
0.95 for negate = True )
coverage _ thresh : float , optional
the minimal coverage for alignment of a sequence to the database .
if None , take the defaults ( 0.3 for negate = False , 0.95 for negate = True )
Return
output _ no _ chimers _ fp : string
filepath to fasta file with no chimeras of None if error encountered"""
|
logger = logging . getLogger ( __name__ )
logger . info ( '--------------------------------------------------------' )
logger . info ( 'launch_workflow for file %s' % seqs_fp )
# Step 1 : Trim sequences to specified length
output_trim_fp = join ( working_dir , "%s.trim" % basename ( seqs_fp ) )
with open ( output_trim_fp , 'w' ) as out_f :
for label , seq in trim_seqs ( input_seqs = sequence_generator ( seqs_fp ) , trim_len = trim_length , left_trim_len = left_trim_length ) :
out_f . write ( ">%s\n%s\n" % ( label , seq ) )
# Step 2 : Dereplicate sequences
output_derep_fp = join ( working_dir , "%s.derep" % basename ( output_trim_fp ) )
dereplicate_seqs ( seqs_fp = output_trim_fp , output_fp = output_derep_fp , min_size = min_size , threads = threads_per_sample )
# Step 3 : Remove artifacts
output_artif_fp , num_seqs_left , _ = remove_artifacts_seqs ( seqs_fp = output_derep_fp , ref_fp = ref_fp , working_dir = working_dir , ref_db_fp = ref_db_fp , negate = True , threads = threads_per_sample , sim_thresh = sim_thresh )
if not output_artif_fp :
warnings . warn ( 'Problem removing artifacts from file %s' % seqs_fp , UserWarning )
logger . warning ( 'remove artifacts failed, aborting' )
return None
# Step 4 : Multiple sequence alignment
if num_seqs_left > 1 :
output_msa_fp = join ( working_dir , "%s.msa" % basename ( output_artif_fp ) )
alignment = multiple_sequence_alignment ( seqs_fp = output_artif_fp , threads = threads_per_sample )
if not alignment :
warnings . warn ( 'Problem performing multiple sequence alignment ' 'on file %s' % seqs_fp , UserWarning )
logger . warning ( 'msa failed. aborting' )
return None
elif num_seqs_left == 1 : # only one sequence after remove artifacts ( but could be many reads )
# no need to run MSA - just use the pre - msa file as input for next step
output_msa_fp = output_artif_fp
else :
err_msg = ( 'No sequences left after artifact removal in ' 'file %s' % seqs_fp )
warnings . warn ( err_msg , UserWarning )
logger . warning ( err_msg )
return None
# Step 5 : Launch deblur
output_deblur_fp = join ( working_dir , "%s.deblur" % basename ( output_msa_fp ) )
with open ( output_deblur_fp , 'w' ) as f :
seqs = deblur ( sequence_generator ( output_msa_fp ) , mean_error , error_dist , indel_prob , indel_max )
if seqs is None :
warnings . warn ( 'multiple sequence alignment file %s contains ' 'no sequences' % output_msa_fp , UserWarning )
logger . warn ( 'no sequences returned from deblur for file %s' % output_msa_fp )
return None
for s in seqs : # remove ' - ' from aligned sequences
s . sequence = s . sequence . replace ( '-' , '' )
f . write ( s . to_fasta ( ) )
# Step 6 : Chimera removal
output_no_chimeras_fp = remove_chimeras_denovo_from_seqs ( output_deblur_fp , working_dir , threads = threads_per_sample )
logger . info ( 'finished processing file' )
return output_no_chimeras_fp
|
def make_absolute ( base , relative ) :
"""Make the given ( relative ) URL absolute .
Args :
base ( str ) : The absolute URL the relative url was found on .
relative ( str ) : The ( possibly relative ) url to make absolute .
Returns :
str : The absolute URL ."""
|
# Python 3.4 and lower do not remove folder traversal strings .
# This was fixed in 3.5 ( https : / / docs . python . org / 3 / whatsnew / 3.5 . html # urllib )
while relative . startswith ( '/../' ) or relative . startswith ( '../' ) :
relative = relative [ 3 : ]
base_parsed = urlparse ( base )
new_path = base_parsed . path . rsplit ( '/' , 1 ) [ 0 ]
base_parsed = base_parsed . _replace ( path = new_path )
base = base_parsed . geturl ( )
return urljoin ( base , relative )
|
def _validate_ding0_mv_grid_import ( grid , ding0_grid ) :
"""Verify imported data with original data from Ding0
Parameters
grid : MVGrid
MV Grid data ( eDisGo )
ding0 _ grid : ding0 . MVGridDing0
Ding0 MV grid object
Notes
The data validation excludes grid components located in aggregated load
areas as these are represented differently in eDisGo .
Returns
dict
Dict showing data integrity for each type of grid component"""
|
integrity_checks = [ 'branch_tee' , 'disconnection_point' , 'mv_transformer' , 'lv_station' # , ' line ' ,
]
data_integrity = { }
data_integrity . update ( { _ : { 'ding0' : None , 'edisgo' : None , 'msg' : None } for _ in integrity_checks } )
# Check number of branch tees
data_integrity [ 'branch_tee' ] [ 'ding0' ] = len ( ding0_grid . _cable_distributors )
data_integrity [ 'branch_tee' ] [ 'edisgo' ] = len ( grid . graph . nodes_by_attribute ( 'branch_tee' ) )
# Check number of disconnecting points
data_integrity [ 'disconnection_point' ] [ 'ding0' ] = len ( ding0_grid . _circuit_breakers )
data_integrity [ 'disconnection_point' ] [ 'edisgo' ] = len ( grid . graph . nodes_by_attribute ( 'mv_disconnecting_point' ) )
# Check number of MV transformers
data_integrity [ 'mv_transformer' ] [ 'ding0' ] = len ( list ( ding0_grid . station ( ) . transformers ( ) ) )
data_integrity [ 'mv_transformer' ] [ 'edisgo' ] = len ( grid . station . transformers )
# Check number of LV stations in MV grid ( graph )
data_integrity [ 'lv_station' ] [ 'edisgo' ] = len ( grid . graph . nodes_by_attribute ( 'lv_station' ) )
data_integrity [ 'lv_station' ] [ 'ding0' ] = len ( [ _ for _ in ding0_grid . _graph . nodes ( ) if ( isinstance ( _ , LVStationDing0 ) and not _ . grid . grid_district . lv_load_area . is_aggregated ) ] )
# Check number of lines outside aggregated LA
# edges _ w _ la = grid . graph . lines ( )
# data _ integrity [ ' line ' ] [ ' edisgo ' ] = len ( [ _ for _ in edges _ w _ la
# if not ( _ [ ' adj _ nodes ' ] [ 0 ] = = grid . station or
# _ [ ' adj _ nodes ' ] [ 1 ] = = grid . station ) and
# _ [ ' line ' ] . _ length > . 5 ] )
# data _ integrity [ ' line ' ] [ ' ding0 ' ] = len (
# [ _ for _ in ding0 _ grid . lines ( )
# if not _ [ ' branch ' ] . connects _ aggregated ] )
# raise an error if data does not match
for c in integrity_checks :
if data_integrity [ c ] [ 'edisgo' ] != data_integrity [ c ] [ 'ding0' ] :
raise ValueError ( 'Unequal number of objects for {c}. ' '\n\tDing0:\t{ding0_no}' '\n\teDisGo:\t{edisgo_no}' . format ( c = c , ding0_no = data_integrity [ c ] [ 'ding0' ] , edisgo_no = data_integrity [ c ] [ 'edisgo' ] ) )
return data_integrity
|
def status ( self , ** kwargs ) :
"""Get the status of the geo node .
Args :
* * kwargs : Extra options to send to the server ( e . g . sudo )
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabGetError : If the server failed to perform the request
Returns :
dict : The status of the geo node"""
|
path = '/geo_nodes/%s/status' % self . get_id ( )
return self . manager . gitlab . http_get ( path , ** kwargs )
|
def get_field_values_list ( self , d ) :
"""Iterate over a ( possibly nested ) dict , and return a list
of all children queries , as a dict of the following structure :
' field ' : ' some _ field _ _ iexact ' ,
' value ' : ' some _ value ' ,
' value _ from ' : ' optional _ range _ val1 ' ,
' value _ to ' : ' optional _ range _ val2 ' ,
' negate ' : True ,
OR relations are expressed as an extra " line " between queries ."""
|
fields = [ ]
children = d . get ( 'children' , [ ] )
for child in children :
if isinstance ( child , dict ) :
fields . extend ( self . get_field_values_list ( child ) )
else :
f = { 'field' : child [ 0 ] , 'value' : child [ 1 ] }
if self . _is_range ( child ) :
f [ 'value_from' ] = child [ 1 ] [ 0 ]
f [ 'value_to' ] = child [ 1 ] [ 1 ]
f [ 'negate' ] = d . get ( 'negated' , False )
fields . append ( f )
# add _ OR line
if d [ 'connector' ] == 'OR' and children [ - 1 ] != child :
fields . append ( { 'field' : '_OR' , 'value' : 'null' } )
return fields
|
def _unsupported_message_type ( self ) :
"""Check if the current message matches the configured message type ( s ) .
: rtype : bool"""
|
if isinstance ( self . _message_type , ( tuple , list , set ) ) :
return self . message_type not in self . _message_type
return self . message_type != self . _message_type
|
def _Bound_hs ( h , s ) :
"""Region definition for input h and s
Parameters
h : float
Specific enthalpy , [ kJ / kg ]
s : float
Specific entropy , [ kJ / kgK ]
Returns
region : float
IAPWS - 97 region code
References
Wagner , W ; Kretzschmar , H - J : International Steam Tables : Properties of
Water and Steam Based on the Industrial Formulation IAPWS - IF97 ; Springer ,
2008 ; doi : 10.1007/978-3-540-74234-0 . Fig . 2.14"""
|
region = None
s13 = _Region1 ( 623.15 , 100 ) [ "s" ]
s13s = _Region1 ( 623.15 , Ps_623 ) [ "s" ]
sTPmax = _Region2 ( 1073.15 , 100 ) [ "s" ]
s2ab = _Region2 ( 1073.15 , 4 ) [ "s" ]
# Left point in h - s plot
smin = _Region1 ( 273.15 , 100 ) [ "s" ]
hmin = _Region1 ( 273.15 , Pmin ) [ "h" ]
# Right point in h - s plot
_Pmax = _Region2 ( 1073.15 , Pmin )
hmax = _Pmax [ "h" ]
smax = _Pmax [ "s" ]
# Region 4 left and right point
_sL = _Region1 ( 273.15 , Pmin )
h4l = _sL [ "h" ]
s4l = _sL [ "s" ]
_sV = _Region2 ( 273.15 , Pmin )
h4v = _sV [ "h" ]
s4v = _sV [ "s" ]
if smin <= s <= s13 :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h1_s ( s )
T = _Backward1_T_Ps ( 100 , s ) - 0.0218
hmax = _Region1 ( T , 100 ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 1
elif s13 < s <= s13s :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h1_s ( s )
h13 = _h13_s ( s )
v = _Backward3_v_Ps ( 100 , s ) * ( 1 + 9.6e-5 )
T = _Backward3_T_Ps ( 100 , s ) - 0.0248
hmax = _Region3 ( 1 / v , T ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h < h13 :
region = 1
elif h13 <= h <= hmax :
region = 3
elif s13s < s <= sc :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h3a_s ( s )
v = _Backward3_v_Ps ( 100 , s ) * ( 1 + 9.6e-5 )
T = _Backward3_T_Ps ( 100 , s ) - 0.0248
hmax = _Region3 ( 1 / v , T ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 3
elif sc < s < 5.049096828 :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2c3b_s ( s )
v = _Backward3_v_Ps ( 100 , s ) * ( 1 + 9.6e-5 )
T = _Backward3_T_Ps ( 100 , s ) - 0.0248
hmax = _Region3 ( 1 / v , T ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 3
elif 5.049096828 <= s < 5.260578707 : # Specific zone with 2-3 boundary in s shape
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2c3b_s ( s )
h23max = _Region2 ( 863.15 , 100 ) [ "h" ]
h23min = _Region2 ( 623.15 , Ps_623 ) [ "h" ]
T = _Backward2_T_Ps ( 100 , s ) - 0.019
hmax = _Region2 ( T , 100 ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h < h23min :
region = 3
elif h23min <= h < h23max :
if _Backward2c_P_hs ( h , s ) <= _P23_T ( _t_hs ( h , s ) ) :
region = 2
else :
region = 3
elif h23max <= h <= hmax :
region = 2
elif 5.260578707 <= s < 5.85 :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2c3b_s ( s )
T = _Backward2_T_Ps ( 100 , s ) - 0.019
hmax = _Region2 ( T , 100 ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 2
elif 5.85 <= s < sTPmax :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2ab_s ( s )
T = _Backward2_T_Ps ( 100 , s ) - 0.019
hmax = _Region2 ( T , 100 ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 2
elif sTPmax <= s < s2ab :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2ab_s ( s )
P = _Backward2_P_hs ( h , s )
hmax = _Region2 ( 1073.15 , P ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 2
elif s2ab <= s < s4v :
hmin = h4l + ( s - s4l ) / ( s4v - s4l ) * ( h4v - h4l )
hs = _h2ab_s ( s )
P = _Backward2_P_hs ( h , s )
hmax = _Region2 ( 1073.15 , P ) [ "h" ]
if hmin <= h < hs :
region = 4
elif hs <= h <= hmax :
region = 2
elif s4v <= s <= smax :
hmin = _Region2 ( 273.15 , Pmin ) [ "h" ]
P = _Backward2a_P_hs ( h , s )
hmax = _Region2 ( 1073.15 , P ) [ "h" ]
if Pmin <= P <= 100 and hmin <= h <= hmax :
region = 2
# Check region 5
if not region and _Region5 ( 1073.15 , 50 ) [ "s" ] < s <= _Region5 ( 2273.15 , Pmin ) [ "s" ] and _Region5 ( 1073.15 , 50 ) [ "h" ] < h <= _Region5 ( 2273.15 , Pmin ) [ "h" ] :
def funcion ( par ) :
return ( _Region5 ( par [ 0 ] , par [ 1 ] ) [ "h" ] - h , _Region5 ( par [ 0 ] , par [ 1 ] ) [ "s" ] - s )
T , P = fsolve ( funcion , [ 1400 , 1 ] )
if 1073.15 < T <= 2273.15 and Pmin <= P <= 50 :
region = 5
return region
|
def generate_sections ( self ) :
"""Return all hubs , slugs , and upload counts ."""
|
datasets = Dataset . objects . values ( 'hub_slug' ) . annotate ( upload_count = Count ( 'hub_slug' ) ) . order_by ( '-upload_count' )
return [ { 'count' : dataset [ 'upload_count' ] , 'name' : get_hub_name_from_slug ( dataset [ 'hub_slug' ] ) , 'slug' : dataset [ 'hub_slug' ] } for dataset in datasets ]
|
def complement ( self , frame_bound = None ) :
r"""Return the filter that makes the frame tight .
The complementary filter is designed such that the union of a filter
bank and its complementary filter forms a tight frame .
Parameters
frame _ bound : float or None
The desired frame bound : math : ` A = B ` of the resulting tight frame .
The chosen bound should be larger than the sum of squared
evaluations of all filters in the filter bank . If None ( the
default ) , the method chooses the smallest feasible bound .
Returns
complement : Filter
The complementary filter .
See Also
estimate _ frame _ bounds : estimate the frame bounds
Examples
> > > from matplotlib import pyplot as plt
> > > G = graphs . Sensor ( 100 , seed = 42)
> > > G . estimate _ lmax ( )
> > > g = filters . Abspline ( G , 4)
> > > A , B = g . estimate _ frame _ bounds ( )
> > > print ( ' A = { : . 3f } , B = { : . 3f } ' . format ( A , B ) )
A = 0.200 , B = 1.971
> > > fig , axes = plt . subplots ( 1 , 2)
> > > fig , ax = g . plot ( ax = axes [ 0 ] )
> > > g + = g . complement ( )
> > > A , B = g . estimate _ frame _ bounds ( )
> > > print ( ' A = { : . 3f } , B = { : . 3f } ' . format ( A , B ) )
A = 1.971 , B = 1.971
> > > fig , ax = g . plot ( ax = axes [ 1 ] )"""
|
def kernel ( x , * args , ** kwargs ) :
y = self . evaluate ( x )
np . power ( y , 2 , out = y )
y = np . sum ( y , axis = 0 )
if frame_bound is None :
bound = y . max ( )
elif y . max ( ) > frame_bound :
raise ValueError ( 'The chosen bound is not feasible. ' 'Choose at least {}.' . format ( y . max ( ) ) )
else :
bound = frame_bound
return np . sqrt ( bound - y )
return Filter ( self . G , kernel )
|
def _print_summary ( case , summary ) :
"""Show some statistics from the run"""
|
for dof , data in summary . items ( ) :
b4b = data [ "Bit for Bit" ]
conf = data [ "Configurations" ]
stdout = data [ "Std. Out Files" ]
print ( " " + case + " " + str ( dof ) )
print ( " --------------------" )
print ( " Bit for bit matches : " + str ( b4b [ 0 ] ) + " of " + str ( b4b [ 1 ] ) )
print ( " Configuration matches : " + str ( conf [ 0 ] ) + " of " + str ( conf [ 1 ] ) )
print ( " Std. Out files parsed : " + str ( stdout ) )
print ( "" )
|
def generate_psk ( self , security_key ) :
"""Generate and set a psk from the security key ."""
|
if not self . _psk : # Backup the real identity .
existing_psk_id = self . _psk_id
# Set the default identity and security key for generation .
self . _psk_id = 'Client_identity'
self . _psk = security_key
# Ask the Gateway to generate the psk for the identity .
self . _psk = self . request ( Gateway ( ) . generate_psk ( existing_psk_id ) )
# Restore the real identity .
self . _psk_id = existing_psk_id
return self . _psk
|
def run_sparser ( pmid_list , tmp_dir , num_cores , start_index , end_index , force_read , force_fulltext , cleanup = True , verbose = True ) :
'Run the sparser reader on the pmids in pmid _ list .'
|
reader_version = sparser . get_version ( )
_ , _ , _ , pmids_read , pmids_unread , _ = get_content_to_read ( pmid_list , start_index , end_index , tmp_dir , num_cores , force_fulltext , force_read , 'sparser' , reader_version )
logger . info ( 'Adjusting num cores to length of pmid_list.' )
num_cores = min ( len ( pmid_list ) , num_cores )
logger . info ( 'Adjusted...' )
if num_cores is 1 :
stmts = get_stmts ( pmids_unread , cleanup = cleanup )
stmts . update ( { pmid : get_stmts_from_cache ( pmid ) [ pmid ] for pmid in pmids_read . keys ( ) } )
elif num_cores > 1 :
logger . info ( "Starting a pool with %d cores." % num_cores )
pool = mp . Pool ( num_cores )
pmids_to_read = list ( pmids_unread . keys ( ) )
N = len ( pmids_unread )
dn = int ( N / num_cores )
logger . info ( "Breaking pmids into batches." )
batches = [ ]
for i in range ( num_cores ) :
batches . append ( { k : pmids_unread [ k ] for k in pmids_to_read [ i * dn : min ( ( i + 1 ) * dn , N ) ] } )
get_stmts_func = functools . partial ( get_stmts , cleanup = cleanup , sparser_version = reader_version )
logger . info ( "Mapping get_stmts onto pool." )
unread_res = pool . map ( get_stmts_func , batches )
logger . info ( 'len(unread_res)=%d' % len ( unread_res ) )
read_res = pool . map ( get_stmts_from_cache , pmids_read . keys ( ) )
logger . info ( 'len(read_res)=%d' % len ( read_res ) )
pool . close ( )
logger . info ( 'Multiprocessing pool closed.' )
pool . join ( )
logger . info ( 'Multiprocessing pool joined.' )
stmts = { pmid : stmt_list for res_dict in unread_res + read_res for pmid , stmt_list in res_dict . items ( ) }
logger . info ( 'len(stmts)=%d' % len ( stmts ) )
return ( stmts , pmids_unread )
|
def get_answer ( self , question ) :
"""Asks user a question , then gets user answer
: param question : Question : to ask user
: return : User answer"""
|
self . last_question = str ( question ) . strip ( )
user_answer = input ( self . last_question )
return user_answer . strip ( )
|
def set_preferred_prefix_for_namespace ( self , ns_uri , prefix , add_if_not_exist = False ) :
"""Sets the preferred prefix for ns _ uri . If add _ if _ not _ exist is True ,
the prefix is added if it ' s not already registered . Otherwise ,
setting an unknown prefix as preferred is an error . The default
is False . Setting to None always works , and indicates a preference
to use the namespace as a default . The given namespace must already
be in this set .
Args :
ns _ uri ( str ) : the namespace URI whose prefix is to be set
prefix ( str ) : the preferred prefix to set
add _ if _ not _ exist ( bool ) : Whether to add the prefix if it is not
already set as a prefix of ` ` ns _ uri ` ` .
Raises :
NamespaceNotFoundError : If namespace ` ` ns _ uri ` ` isn ' t in this set .
DuplicatePrefixError : If ` ` prefix ` ` already maps to a different
namespace ."""
|
ni = self . __lookup_uri ( ns_uri )
if not prefix :
ni . preferred_prefix = None
elif prefix in ni . prefixes :
ni . preferred_prefix = prefix
elif add_if_not_exist :
self . add_prefix ( ns_uri , prefix , set_as_preferred = True )
else :
raise PrefixNotFoundError ( prefix )
|
def add_source ( self , filename , env_filename ) :
"""Add a source to the PEX environment .
: param filename : The source filename to add to the PEX ; None to create an empty file at
` env _ filename ` .
: param env _ filename : The destination filename in the PEX . This path
must be a relative path ."""
|
self . _ensure_unfrozen ( 'Adding source' )
self . _copy_or_link ( filename , env_filename , "source" )
|
def calculate_megno ( self ) :
"""Return the current MEGNO value .
Note that you need to call init _ megno ( ) before the start of the simulation ."""
|
if self . _calculate_megno == 0 :
raise RuntimeError ( "MEGNO cannot be calculated. Make sure to call init_megno() after adding all particles but before integrating the simulation." )
clibrebound . reb_tools_calculate_megno . restype = c_double
return clibrebound . reb_tools_calculate_megno ( byref ( self ) )
|
def add_time_step ( self , ** create_time_step_kwargs ) :
"""Creates a time - step and appends it to the list .
Args :
* * create _ time _ step _ kwargs : Forwarded to
time _ step . TimeStep . create _ time _ step ."""
|
ts = time_step . TimeStep . create_time_step ( ** create_time_step_kwargs )
assert isinstance ( ts , time_step . TimeStep )
self . _time_steps . append ( ts )
|
def moderate ( self , environ , request , id , action , key ) :
try :
id = self . isso . unsign ( key , max_age = 2 ** 32 )
except ( BadSignature , SignatureExpired ) :
raise Forbidden
item = self . comments . get ( id )
thread = self . threads . get ( item [ 'tid' ] )
link = local ( "origin" ) + thread [ "uri" ] + "#isso-%i" % item [ "id" ]
if item is None :
raise NotFound
if request . method == "GET" :
modal = ( "<!DOCTYPE html>" "<html>" "<head>" "<script>" " if (confirm('%s: Are you sure?')) {" " xhr = new XMLHttpRequest;" " xhr.open('POST', window.location.href);" " xhr.send(null);" " xhr.onload = function() {" " window.location.href = %s;" " };" " }" "</script>" % ( action . capitalize ( ) , json . dumps ( link ) ) )
return Response ( modal , 200 , content_type = "text/html" )
if action == "activate" :
if item [ 'mode' ] == 1 :
return Response ( "Already activated" , 200 )
with self . isso . lock :
self . comments . activate ( id )
self . signal ( "comments.activate" , thread , item )
return Response ( "Yo" , 200 )
elif action == "edit" :
data = request . get_json ( )
with self . isso . lock :
rv = self . comments . update ( id , data )
for key in set ( rv . keys ( ) ) - API . FIELDS :
rv . pop ( key )
self . signal ( "comments.edit" , rv )
return JSON ( rv , 200 )
else :
with self . isso . lock :
self . comments . delete ( id )
self . cache . delete ( 'hash' , ( item [ 'email' ] or item [ 'remote_addr' ] ) . encode ( 'utf-8' ) )
self . signal ( "comments.delete" , id )
return Response ( "Yo" , 200 )
"""@ api { get } / get comments
@ apiGroup Thread
@ apiDescription Queries the comments of a thread .
@ apiParam { string } uri
The URI of thread to get the comments from .
@ apiParam { number } [ parent ]
Return only comments that are children of the comment with the provided ID .
@ apiUse plainParam
@ apiParam { number } [ limit ]
The maximum number of returned top - level comments . Omit for unlimited results .
@ apiParam { number } [ nested _ limit ]
The maximum number of returned nested comments per commint . Omit for unlimited results .
@ apiParam { number } [ after ]
Includes only comments were added after the provided UNIX timestamp .
@ apiSuccess { number } total _ replies
The number of replies if the ` limit ` parameter was not set . If ` after ` is set to ` X ` , this is the number of comments that were created after ` X ` . So setting ` after ` may change this value !
@ apiSuccess { Object [ ] } replies
The list of comments . Each comment also has the ` total _ replies ` , ` replies ` , ` id ` and ` hidden _ replies ` properties to represent nested comments .
@ apiSuccess { number } id
Id of the comment ` replies ` is the list of replies of . ` null ` for the list of toplevel comments .
@ apiSuccess { number } hidden _ replies
The number of comments that were ommited from the results because of the ` limit ` request parameter . Usually , this will be ` total _ replies ` - ` limit ` .
@ apiExample { curl } Get 2 comments with 5 responses :
curl ' https : / / comments . example . com / ? uri = / thread / & limit = 2 & nested _ limit = 5'
@ apiSuccessExample Example reponse :
" total _ replies " : 14,
" replies " : [
" website " : null ,
" author " : null ,
" parent " : null ,
" created " : 1464818460.732863,
" text " : " & lt ; p & gt ; Hello , World ! & lt ; / p & gt ; " ,
" total _ replies " : 1,
" hidden _ replies " : 0,
" dislikes " : 2,
" modified " : null ,
" mode " : 1,
" replies " : [
" website " : null ,
" author " : null ,
" parent " : 1,
" created " : 1464818460.769638,
" text " : " & lt ; p & gt ; Hi , now some Markdown : & lt ; em & gt ; Italic & lt ; / em & gt ; , & lt ; strong & gt ; bold & lt ; / strong & gt ; , & lt ; code & gt ; monospace & lt ; / code & gt ; . & lt ; / p & gt ; " ,
" dislikes " : 0,
" modified " : null ,
" mode " : 1,
" hash " : " 2af4e1a6c96a " ,
" id " : 2,
" likes " : 2
" hash " : " 1cb6cc0309a2 " ,
" id " : 1,
" likes " : 2
" website " : null ,
" author " : null ,
" parent " : null ,
" created " : 1464818460.80574,
" text " : " & lt ; p & gt ; Lorem ipsum dolor sit amet , consectetur adipisicing elit . Accusantium at commodi cum deserunt dolore , error fugiat harum incidunt , ipsa ipsum mollitia nam provident rerum sapiente suscipit tempora vitae ? Est , qui ? & lt ; / p & gt ; " ,
" total _ replies " : 0,
" hidden _ replies " : 0,
" dislikes " : 0,
" modified " : null ,
" mode " : 1,
" replies " : [ ] ,
" hash " : " 1cb6cc0309a2 " ,
" id " : 3,
" likes " : 0
" id " : null ,
" hidden _ replies " : 12"""
| |
def getPlot ( self , params ) :
"""Override this function
arguments :
params ( dict )
returns :
matplotlib . pyplot figure"""
|
try :
return eval ( "self." + str ( params [ 'output_id' ] ) + "(params)" )
except AttributeError :
df = self . getData ( params )
if df is None :
return None
return df . plot ( )
|
def license ( self , license_id : str , token : dict = None , prot : str = "https" ) -> dict :
"""Get details about a specific license .
: param str token : API auth token
: param str license _ id : license UUID
: param str prot : https [ DEFAULT ] or http
( use it only for dev and tracking needs ) ."""
|
# handling request parameters
payload = { "lid" : license_id }
# search request
license_url = "{}://v1.{}.isogeo.com/licenses/{}" . format ( prot , self . api_url , license_id )
license_req = self . get ( license_url , headers = self . header , params = payload , proxies = self . proxies , verify = self . ssl , )
# checking response
checker . check_api_response ( license_req )
# end of method
return license_req . json ( )
|
def com_google_fonts_check_metadata_unique_weight_style_pairs ( family_metadata ) :
"""METADATA . pb : check if fonts field
only contains unique style : weight pairs ."""
|
pairs = { }
for f in family_metadata . fonts :
styleweight = f"{f.style}:{f.weight}"
pairs [ styleweight ] = 1
if len ( set ( pairs . keys ( ) ) ) != len ( family_metadata . fonts ) :
yield FAIL , ( "Found duplicated style:weight pair" " in METADATA.pb fonts field." )
else :
yield PASS , ( "METADATA.pb \"fonts\" field only has" " unique style:weight pairs." )
|
def _fix_squeeze ( self , inputs , new_attr ) :
"""MXNet doesnt have a squeeze operator .
Using " split " to perform similar operation .
" split " can be slower compared to " reshape " .
This can have performance impact .
TODO : Remove this implementation once mxnet adds the support ."""
|
axes = new_attr . get ( 'axis' )
op = mx . sym . split ( inputs [ 0 ] , axis = axes [ 0 ] , num_outputs = 1 , squeeze_axis = 1 )
for i in axes [ 1 : ] :
op = mx . sym . split ( op , axis = i - 1 , num_outputs = 1 , squeeze_axis = 1 )
return op
|
def make_python_name ( s , default = None , number_prefix = 'N' , encoding = "utf-8" ) :
"""Returns a unicode string that can be used as a legal python identifier .
: Arguments :
string
* default *
use * default * if * s * is ` ` None ` `
* number _ prefix *
string to prepend if * s * starts with a number"""
|
if s in ( '' , None ) :
s = default
s = str ( s )
s = re . sub ( "[^a-zA-Z0-9_]" , "_" , s )
if not re . match ( '\d' , s ) is None :
s = number_prefix + s
return unicode ( s , encoding )
|
def update_refchip_with_shift ( chip_wcs , wcslin , fitgeom = 'rscale' , rot = 0.0 , scale = 1.0 , xsh = 0.0 , ysh = 0.0 , fit = None , xrms = None , yrms = None ) :
"""Compute the matrix for the scale and rotation correction
Parameters
chip _ wcs : wcs object
HST of the input image
wcslin : wcs object
Reference WCS from which the offsets / rotations are determined
fitgeom : str
NOT USED
rot : float
Amount of rotation measured in fit to be applied .
[ Default = 0.0]
scale : float
Amount of scale change measured in fit to be applied .
[ Default = 1.0]
xsh : float
Offset in X pixels from defined tangent plane to be applied to image .
[ Default = 0.0]
ysh : float
Offset in Y pixels from defined tangent plane to be applied to image .
[ Default = 0.0]
fit : arr
Linear coefficients for fit
[ Default = None ]
xrms : float
RMS of fit in RA ( in decimal degrees ) that will be recorded as
CRDER1 in WCS and header
[ Default = None ]
yrms : float
RMS of fit in Dec ( in decimal degrees ) that will be recorded as
CRDER2 in WCS and header
[ Default = None ]"""
|
# compute the matrix for the scale and rotation correction
if fit is None :
fit = linearfit . buildFitMatrix ( rot , scale )
shift = np . asarray ( [ xsh , ysh ] ) - np . dot ( wcslin . wcs . crpix , fit ) + wcslin . wcs . crpix
fit = np . linalg . inv ( fit ) . T
cwcs = chip_wcs . deepcopy ( )
cd_eye = np . eye ( chip_wcs . wcs . cd . shape [ 0 ] , dtype = ndfloat128 )
zero_shift = np . zeros ( 2 , dtype = ndfloat128 )
naxis1 , naxis2 = chip_wcs . pixel_shape
# estimate precision necessary for iterative processes :
maxiter = 100
crpix2corners = np . dstack ( [ i . flatten ( ) for i in np . meshgrid ( [ 1 , naxis1 ] , [ 1 , naxis2 ] ) ] ) [ 0 ] - chip_wcs . wcs . crpix
maxUerr = 1.0e-5 / np . amax ( np . linalg . norm ( crpix2corners , axis = 1 ) )
# estimate step for numerical differentiation . We need a step
# large enough to avoid rounding errors and small enough to get a
# better precision for numerical differentiation .
# TODO : The logic below should be revised at a later time so that it
# better takes into account the two competing requirements .
hx = max ( 1.0 , min ( 20.0 , ( chip_wcs . wcs . crpix [ 0 ] - 1.0 ) / 100.0 , ( naxis1 - chip_wcs . wcs . crpix [ 0 ] ) / 100.0 ) )
hy = max ( 1.0 , min ( 20.0 , ( chip_wcs . wcs . crpix [ 1 ] - 1.0 ) / 100.0 , ( naxis2 - chip_wcs . wcs . crpix [ 1 ] ) / 100.0 ) )
# compute new CRVAL for the image WCS :
crpixinref = wcslin . wcs_world2pix ( chip_wcs . wcs_pix2world ( [ chip_wcs . wcs . crpix ] , 1 ) , 1 )
crpixinref = np . dot ( fit , ( crpixinref - shift ) . T ) . T
chip_wcs . wcs . crval = wcslin . wcs_pix2world ( crpixinref , 1 ) [ 0 ]
chip_wcs . wcs . set ( )
# initial approximation for CD matrix of the image WCS :
( U , u ) = linearize ( cwcs , chip_wcs , wcslin , chip_wcs . wcs . crpix , fit , shift , hx = hx , hy = hy )
err0 = np . amax ( np . abs ( U - cd_eye ) ) . astype ( np . float64 )
chip_wcs . wcs . cd = np . dot ( chip_wcs . wcs . cd . astype ( ndfloat128 ) , U ) . astype ( np . float64 )
chip_wcs . wcs . set ( )
# NOTE : initial solution is the exact mathematical solution ( modulo numeric
# differentiation ) . However , e . g . , due to rounding errors , approximate
# numerical differentiation , the solution may be improved by performing
# several iterations . The next step will try to perform
# fixed - point iterations to " improve " the solution
# but this is not really required .
# Perform fixed - point iterations to improve the approximation
# for CD matrix of the image WCS ( actually for the U matrix ) .
for i in range ( maxiter ) :
( U , u ) = linearize ( chip_wcs , chip_wcs , wcslin , chip_wcs . wcs . crpix , cd_eye , zero_shift , hx = hx , hy = hy )
err = np . amax ( np . abs ( U - cd_eye ) ) . astype ( np . float64 )
if err > err0 :
break
chip_wcs . wcs . cd = np . dot ( chip_wcs . wcs . cd , U ) . astype ( np . float64 )
chip_wcs . wcs . set ( )
if err < maxUerr :
break
err0 = err
if xrms is not None :
chip_wcs . wcs . crder = np . array ( [ xrms , yrms ] )
|
def check_upgrade_impact ( system_image , kickstart_image = None , issu = True , ** kwargs ) :
'''Display upgrade impact information without actually upgrading the device .
system _ image ( Mandatory Option )
Path on bootflash : to system image upgrade file .
kickstart _ image
Path on bootflash : to kickstart image upgrade file .
( Not required if using combined system / kickstart image file )
Default : None
issu
When True : Attempt In Service Software Upgrade . ( non - disruptive )
The upgrade will abort if issu is not possible .
When False : Force ( disruptive ) Upgrade / Downgrade .
Default : True
timeout
Timeout in seconds for long running ' install all ' impact command .
Default : 900
error _ pattern
Use the option to pass in a regular expression to search for in the
output of the ' install all impact ' command that indicates an error
has occurred . This option is only used when proxy minion connection
type is ssh and otherwise ignored .
. . code - block : : bash
salt ' n9k ' nxos . check _ upgrade _ impact system _ image = nxos . 9.2.1 . bin
salt ' n7k ' nxos . check _ upgrade _ impact system _ image = n7000 - s2 - dk9.8.1.1 . bin \ kickstart _ image = n7000 - s2 - kickstart . 8.1.1 . bin issu = False'''
|
# Input Validation
if not isinstance ( issu , bool ) :
return 'Input Error: The [issu] parameter must be either True or False'
si = system_image
ki = kickstart_image
dev = 'bootflash'
cmd = 'terminal dont-ask ; show install all impact'
if ki is not None :
cmd = cmd + ' kickstart {0}:{1} system {0}:{2}' . format ( dev , ki , si )
else :
cmd = cmd + ' nxos {0}:{1}' . format ( dev , si )
if issu and ki is None :
cmd = cmd + ' non-disruptive'
log . info ( "Check upgrade impact using command: '%s'" , cmd )
kwargs . update ( { 'timeout' : kwargs . get ( 'timeout' , 900 ) } )
error_pattern_list = [ 'Another install procedure may be in progress' , 'Pre-upgrade check failed' ]
kwargs . update ( { 'error_pattern' : error_pattern_list } )
# Execute Upgrade Impact Check
try :
impact_check = __salt__ [ 'nxos.sendline' ] ( cmd , ** kwargs )
except CommandExecutionError as e :
impact_check = ast . literal_eval ( e . message )
return _parse_upgrade_data ( impact_check )
|
def _agent_registration ( self ) :
"""Register this agent with the server .
This method registers the cfg agent with the neutron server so hosting
devices can be assigned to it . In case the server is not ready to
accept registration ( it sends a False ) then we retry registration
for ` MAX _ REGISTRATION _ ATTEMPTS ` with a delay of
` REGISTRATION _ RETRY _ DELAY ` . If there is no server response or a
failure to register after the required number of attempts ,
the agent stops itself ."""
|
for attempts in range ( MAX_REGISTRATION_ATTEMPTS ) :
context = bc . context . get_admin_context_without_session ( )
self . send_agent_report ( self . agent_state , context )
try :
res = self . devmgr_rpc . register_for_duty ( context )
except Exception :
res = False
LOG . warning ( "[Agent registration] Rpc exception. Neutron " "may not be available or busy. Retrying " "in %0.2f seconds " , REGISTRATION_RETRY_DELAY )
if res is True :
LOG . info ( "[Agent registration] Agent successfully registered" )
return
elif res is False :
LOG . warning ( "[Agent registration] Neutron server said " "that device manager was not ready. Retrying " "in %0.2f seconds " , REGISTRATION_RETRY_DELAY )
time . sleep ( REGISTRATION_RETRY_DELAY )
elif res is None :
LOG . error ( "[Agent registration] Neutron server said that " "no device manager was found. Cannot continue. " "Exiting!" )
raise SystemExit ( _ ( "Cfg Agent exiting" ) )
LOG . error ( "[Agent registration] %d unsuccessful registration " "attempts. Exiting!" , MAX_REGISTRATION_ATTEMPTS )
raise SystemExit ( _ ( "Cfg Agent exiting" ) )
|
def parse ( self , argument ) :
"""Parses argument as whitespace - separated list of strings .
It also parses argument as comma - separated list of strings if requested .
Args :
argument : string argument passed in the commandline .
Returns :
[ str ] , the parsed flag value ."""
|
if isinstance ( argument , list ) :
return argument
elif not argument :
return [ ]
else :
if self . _comma_compat :
argument = argument . replace ( ',' , ' ' )
return argument . split ( )
|
def cli ( ctx , organism = "" , sequence = "" ) :
"""[ UNTESTED ] Get all of the sequence ' s alterations
Output :
A list of sequence alterations ( ? )"""
|
return ctx . gi . annotations . get_sequence_alterations ( organism = organism , sequence = sequence )
|
def _get_recursive_state ( widget , store = None , drop_defaults = False ) :
"""Gets the embed state of a widget , and all other widgets it refers to as well"""
|
if store is None :
store = dict ( )
state = widget . _get_embed_state ( drop_defaults = drop_defaults )
store [ widget . model_id ] = state
# Loop over all values included in state ( i . e . don ' t consider excluded values ) :
for ref in _find_widget_refs_by_state ( widget , state [ 'state' ] ) :
if ref . model_id not in store :
_get_recursive_state ( ref , store , drop_defaults = drop_defaults )
return store
|
def plot_emg_spect_freq ( freq_axis , power_axis , max_freq , median_freq ) :
"""Brief
A plot with frequency power spectrum of the input EMG signal is presented graphically , highlighting maximum and
median power frequency .
Description
Function intended to generate a single Bokeh figure graphically describing and identifying maximum and median
power frequency on Power Spectrum .
Applied in the Notebook titled " EMG Analysis - Time and Frequency Parameters " .
Parameters
freq _ axis : list
List with the values of power spectrum frequency axis .
power _ axis : list
List with the values of power spectrum y axis ( relative weight of the frequency component on signal
reconstruction .
max _ freq : float
Frequency value registered when the maximum power is reached on the spectrum .
median _ freq : float
Frequency value registered when the half of the total power is reached on the cumulative power spectrum ."""
|
# List that store the figure handler
list_figures = [ ]
# Plotting of EMG Power Spectrum
list_figures . append ( figure ( x_axis_label = 'Frequency (Hz)' , y_axis_label = 'Relative Power (a.u.)' , ** opensignals_kwargs ( "figure" ) ) )
list_figures [ - 1 ] . line ( freq_axis , power_axis , legend = "Power Spectrum" , ** opensignals_kwargs ( "line" ) )
list_figures [ - 1 ] . patch ( list ( freq_axis ) + list ( freq_axis ) [ : : - 1 ] , list ( power_axis ) + list ( numpy . zeros ( len ( power_axis ) ) ) , fill_color = opensignals_color_pallet ( ) , fill_alpha = 0.5 , line_alpha = 0 , legend = "Area Under Curve" )
list_figures [ - 1 ] . line ( [ median_freq , median_freq ] , [ 0 , power_axis [ numpy . where ( freq_axis == median_freq ) [ 0 ] [ 0 ] ] ] , legend = "Median Frequency" , ** opensignals_kwargs ( "line" ) )
list_figures [ - 1 ] . line ( [ max_freq , max_freq ] , [ 0 , power_axis [ numpy . where ( freq_axis == max_freq ) [ 0 ] [ 0 ] ] ] , legend = "Maximum Power Frequency" , ** opensignals_kwargs ( "line" ) )
# Show figure .
opensignals_style ( list_figures )
show ( list_figures [ - 1 ] )
|
def fdpf ( self ) :
"""Fast Decoupled Power Flow
Returns
bool , int
Success flag , number of iterations"""
|
system = self . system
# general settings
self . niter = 1
iter_max = self . config . maxit
self . solved = True
tol = self . config . tol
error = tol + 1
self . iter_mis = [ ]
if ( not system . Line . Bp ) or ( not system . Line . Bpp ) :
system . Line . build_b ( )
# initialize indexing and Jacobian
# ngen = system . SW . n + system . PV . n
sw = system . SW . a
sw . sort ( reverse = True )
no_sw = system . Bus . a [ : ]
no_swv = system . Bus . v [ : ]
for item in sw :
no_sw . pop ( item )
no_swv . pop ( item )
gen = system . SW . a + system . PV . a
gen . sort ( reverse = True )
no_g = system . Bus . a [ : ]
no_gv = system . Bus . v [ : ]
for item in gen :
no_g . pop ( item )
no_gv . pop ( item )
Bp = system . Line . Bp [ no_sw , no_sw ]
Bpp = system . Line . Bpp [ no_g , no_g ]
Fp = self . solver . symbolic ( Bp )
Fpp = self . solver . symbolic ( Bpp )
Np = self . solver . numeric ( Bp , Fp )
Npp = self . solver . numeric ( Bpp , Fpp )
exec ( system . call . fdpf )
# main loop
while error > tol : # P - theta
da = matrix ( div ( system . dae . g [ no_sw ] , system . dae . y [ no_swv ] ) )
self . solver . solve ( Bp , Fp , Np , da )
system . dae . y [ no_sw ] += da
exec ( system . call . fdpf )
normP = max ( abs ( system . dae . g [ no_sw ] ) )
# Q - V
dV = matrix ( div ( system . dae . g [ no_gv ] , system . dae . y [ no_gv ] ) )
self . solver . solve ( Bpp , Fpp , Npp , dV )
system . dae . y [ no_gv ] += dV
exec ( system . call . fdpf )
normQ = max ( abs ( system . dae . g [ no_gv ] ) )
err = max ( [ normP , normQ ] )
self . iter_mis . append ( err )
error = err
self . _iter_info ( self . niter )
self . niter += 1
if self . niter > 4 and self . iter_mis [ - 1 ] > 1000 * self . iter_mis [ 0 ] :
logger . warning ( 'Blown up in {0} iterations.' . format ( self . niter ) )
self . solved = False
break
if self . niter > iter_max :
logger . warning ( 'Reached maximum number of iterations.' )
self . solved = False
break
return self . solved , self . niter
|
def _can_for_object ( self , func_name , object_id , method_name ) :
"""Checks if agent can perform function for object"""
|
can_for_session = self . _can ( func_name )
if ( can_for_session or self . _object_catalog_session is None or self . _override_lookup_session is None ) :
return can_for_session
override_auths = self . _override_lookup_session . get_authorizations_for_agent_and_function ( self . get_effective_agent_id ( ) , self . _get_function_id ( func_name ) )
if not override_auths . available ( ) :
return False
if self . _object_catalog_session is not None :
catalog_ids = list ( getattr ( self . _object_catalog_session , method_name ) ( object_id ) )
for auth in override_auths :
if auth . get_qualifier_id ( ) in catalog_ids :
return True
return False
|
async def post ( self , public_key ) :
"""Writes contents review"""
|
if settings . SIGNATURE_VERIFICATION :
super ( ) . verify ( )
try :
body = json . loads ( self . request . body )
except :
self . set_status ( 400 )
self . write ( { "error" : 400 , "reason" : "Unexpected data format. JSON required" } )
raise tornado . web . Finish
if isinstance ( body [ "message" ] , str ) :
message = json . loads ( body [ "message" ] )
elif isinstance ( body [ "message" ] , dict ) :
message = body [ "message" ]
cid = message . get ( "cid" )
review = message . get ( "review" )
rating = message . get ( "rating" )
coinid = message . get ( "coinid" )
if not all ( [ cid , rating , review ] ) :
self . set_status ( 400 )
self . write ( { "error" : 400 , "reason" : "Missed required fields" } )
if coinid in settings . bridges . keys ( ) :
self . account . blockchain . setendpoint ( settings . bridges [ coinid ] )
else :
self . set_status ( 400 )
self . write ( { "error" : 400 , "reason" : "Invalid coinid" } )
raise tornado . web . Finish
buyer_address = self . account . validator [ coinid ] ( public_key )
review = await self . account . blockchain . addreview ( cid = int ( cid ) , buyer_address = buyer_address , stars = int ( rating ) , review = review )
await self . account . setreview ( cid = cid , txid = review [ "result" ] [ "txid" ] , coinid = coinid )
self . write ( { "cid" : cid , "review" : review , "rating" : rating } )
|
def explode ( col ) :
"""Returns a new row for each element in the given array or map .
Uses the default column name ` col ` for elements in the array and
` key ` and ` value ` for elements in the map unless specified otherwise .
> > > from pyspark . sql import Row
> > > eDF = spark . createDataFrame ( [ Row ( a = 1 , intlist = [ 1,2,3 ] , mapfield = { " a " : " b " } ) ] )
> > > eDF . select ( explode ( eDF . intlist ) . alias ( " anInt " ) ) . collect ( )
[ Row ( anInt = 1 ) , Row ( anInt = 2 ) , Row ( anInt = 3 ) ]
> > > eDF . select ( explode ( eDF . mapfield ) . alias ( " key " , " value " ) ) . show ( )
| key | value |
| a | b |"""
|
sc = SparkContext . _active_spark_context
jc = sc . _jvm . functions . explode ( _to_java_column ( col ) )
return Column ( jc )
|
def get_no_validate ( self , key ) :
"""Return an item without validating the schema ."""
|
x , env = self . get_thunk_env ( key )
# Check if this is a Thunk that needs to be lazily evaluated before we
# return it .
if isinstance ( x , framework . Thunk ) :
x = framework . eval ( x , env )
return x
|
def convert ( self , verbose = True ) :
""": rtype : tuple
: returns : Output containers , messages"""
|
input_transformer = self . _input_class ( self . _filename )
output_transformer = self . _output_class ( )
containers = input_transformer . ingest_containers ( )
output_containers = [ ]
for container in containers :
converted_container = self . _convert_container ( container , input_transformer , output_transformer )
validated = output_transformer . validate ( converted_container )
output_containers . append ( validated )
return output_transformer . emit_containers ( output_containers , verbose )
|
def from_dict ( cls , d ) :
"""Returns : CompleteDos object from dict representation ."""
|
tdos = Dos . from_dict ( d )
struct = Structure . from_dict ( d [ "structure" ] )
pdoss = { }
for i in range ( len ( d [ "pdos" ] ) ) :
at = struct [ i ]
orb_dos = { }
for orb_str , odos in d [ "pdos" ] [ i ] . items ( ) :
orb = orb_str
orb_dos [ orb ] = { Spin ( int ( k ) ) : v for k , v in odos [ "densities" ] . items ( ) }
pdoss [ at ] = orb_dos
return LobsterCompleteDos ( struct , tdos , pdoss )
|
def forwards ( self , orm ) :
"Perform a ' safe ' load using Avocado ' s backup utilities ."
|
from avocado . core import backup
backup . safe_load ( u'0002_avocado_metadata' , backup_path = None , using = 'default' )
|
def _shuffle ( y , labels , random_state ) :
"""Return a shuffled copy of y eventually shuffle among same labels ."""
|
if labels is None :
ind = random_state . permutation ( len ( y ) )
else :
ind = np . arange ( len ( labels ) )
for label in np . unique ( labels ) :
this_mask = ( labels == label )
ind [ this_mask ] = random_state . permutation ( ind [ this_mask ] )
return y [ ind ]
|
def _transliterate ( self , text , outFormat ) :
"""Transliterate the text to the target transliteration scheme ."""
|
result = [ ]
for c in text :
if c . isspace ( ) :
result . append ( c )
try :
result . append ( self [ c ] . equivalents [ outFormat . name ] )
except KeyError :
result . append ( _unrecognised ( c ) )
return result
|
def get_all_integration ( self , ** kwargs ) : # noqa : E501
"""Gets a flat list of all Wavefront integrations available , along with their status # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . get _ all _ integration ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param int offset :
: param int limit :
: return : ResponseContainerPagedIntegration
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . get_all_integration_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . get_all_integration_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def convert_conelp ( c , G , h , dims , A = None , b = None , ** kwargs ) :
"""Applies the clique conversion method of Fukuda et al . to the positive semidefinite blocks of a cone LP .
: param c : : py : class : ` matrix `
: param G : : py : class : ` spmatrix `
: param h : : py : class : ` matrix `
: param dims : dictionary
: param A : : py : class : ` spmatrix ` or : py : class : ` matrix `
: param b : : py : class : ` matrix `
The following example illustrates how to convert a cone LP :
. . code - block : : python
prob = ( c , G , h , dims , A , b )
probc , blk2sparse , symbs = convert _ conelp ( * prob )
The return value ` blk2sparse ` is a list of 4 - tuples
( ` blki , I , J , n ` ) that each defines a mapping between the sparse
matrix representation and the converted block - diagonal
representation , and ` symbs ` is a list of symbolic factorizations
corresponding to each of the semidefinite blocks in the original cone LP .
. . seealso : :
M . Fukuda , M . Kojima , K . Murota , and K . Nakata , ` Exploiting Sparsity
in Semidefinite Programming via Matrix Completion I : General Framework
< http : / / dx . doi . org / 10.1137 / S1052623400366218 > ` _ ,
SIAM Journal on Optimization , 11:3 , 2001 , pp . 647-674.
S . Kim , M . Kojima , M . Mevissen , and M . Yamashita , ` Exploiting Sparsity
in Linear and Nonlinear Matrix Inequalities via Positive Semidefinite
Matrix Completion < http : / / dx . doi . org / 10.1007 / s10107-010-0402-6 > ` _ ,
Mathematical Programming , 129:1 , 2011 , pp . . 33-68."""
|
# extract linear and socp constraints
offsets = dims [ 'l' ] + sum ( dims [ 'q' ] )
G_lq = G [ : offsets , : ]
h_lq = h [ : offsets , 0 ]
# extract semidefinite blocks
G_s = G [ offsets : , : ]
h_s = h [ offsets : , 0 ]
G_converted = [ G_lq ] ;
h_converted = [ h_lq ]
G_coupling = [ ]
dims_list = [ ]
symbs = [ ]
offset = 0
block_to_sparse = [ ]
for k , si in enumerate ( dims [ 's' ] ) : # extract block
G_b = G_s [ offset : offset + si ** 2 , : ]
h_b = h_s [ offset : offset + si ** 2 , 0 ]
offset += si ** 2
# convert block
blkk , b2s , F = convert_block ( G_b , h_b , si , ** kwargs )
G1 , h1 , G2 , blkdims = blkk
G_converted . append ( G1 )
h_converted . append ( h1 )
dims_list . extend ( blkdims )
block_to_sparse . append ( b2s )
symbs . append ( F )
if G2 is not None :
G_coupling . append ( G2 )
G1 = sparse ( G_converted )
I , J , V = [ ] , [ ] , [ ]
offset = [ G_lq . size [ 0 ] , 0 ]
for Gcpl in G_coupling :
I . append ( Gcpl . I + offset [ 0 ] )
J . append ( Gcpl . J + offset [ 1 ] )
V . append ( Gcpl . V )
offset [ 0 ] += Gcpl . size [ 0 ]
offset [ 1 ] += Gcpl . size [ 1 ]
G2 = spmatrix ( [ v for v in itertools . chain ( * V ) ] , [ v for v in itertools . chain ( * I ) ] , [ v for v in itertools . chain ( * J ) ] , tuple ( offset ) )
if offset [ 0 ] == 0 or offset [ 1 ] == 0 :
G = G1
else :
G = sparse ( [ [ G1 ] , [ G2 ] ] )
ct = matrix ( [ c , matrix ( 0.0 , ( G2 . size [ 1 ] , 1 ) ) ] )
if A is not None :
return ( ct , G , matrix ( h_converted ) , { 'l' : dims [ 'l' ] , 'q' : dims [ 'q' ] , 's' : dims_list } , sparse ( [ [ A ] , [ spmatrix ( [ ] , [ ] , [ ] , ( A . size [ 0 ] , G2 . size [ 1 ] ) ) ] ] ) , b ) , block_to_sparse
else :
return ( ct , G , matrix ( h_converted ) , { 'l' : dims [ 'l' ] , 'q' : dims [ 'q' ] , 's' : dims_list } ) , block_to_sparse , symbs
|
def add ( self , instance , modified = True , persistent = None , force_update = False ) :
'''Add a new instance to this : class : ` SessionModel ` .
: param modified : Optional flag indicating if the ` ` instance ` ` has been
modified . By default its value is ` ` True ` ` .
: param force _ update : if ` ` instance ` ` is persistent , it forces an update of the
data rather than a full replacement . This is used by the
: meth : ` insert _ update _ replace ` method .
: rtype : The instance added to the session'''
|
if instance . _meta . type == 'structure' :
return self . _add_structure ( instance )
state = instance . get_state ( )
if state . deleted :
raise ValueError ( 'State is deleted. Cannot add.' )
self . pop ( state . iid )
pers = persistent if persistent is not None else state . persistent
pkname = instance . _meta . pkname ( )
if not pers :
instance . _dbdata . pop ( pkname , None )
# to make sure it is add action
state = instance . get_state ( iid = None )
elif persistent :
instance . _dbdata [ pkname ] = instance . pkvalue ( )
state = instance . get_state ( iid = instance . pkvalue ( ) )
else :
action = 'update' if force_update else None
state = instance . get_state ( action = action , iid = state . iid )
iid = state . iid
if state . persistent :
if modified :
self . _modified [ iid ] = instance
else :
self . _new [ iid ] = instance
return instance
|
def assert_equal ( self , v1 , v2 , ** kwargs ) : # , desc = None , screenshot = False , safe = False ) :
"""Check v1 is equals v2 , and take screenshot if not equals
Args :
- desc ( str ) : some description
- safe ( bool ) : will omit AssertionError if set to True
- screenshot : can be type < None | True | False | PIL . Image >"""
|
is_success = v1 == v2
if is_success :
message = "assert equal success, %s == %s" % ( v1 , v2 )
else :
message = '%s not equal %s' % ( v1 , v2 )
kwargs . update ( { 'message' : message , 'success' : is_success , } )
self . _add_assert ( ** kwargs )
|
def saveProfile ( self , key , settings = None ) :
"""Writes the view settings to the persistent store
: param key : key where the setting will be read from
: param settings : optional QSettings object which can have a group already opened ."""
|
# logger . debug ( " Writing view settings for : { } " . format ( key ) )
if settings is None :
settings = QtCore . QSettings ( )
settings . setValue ( key , self . horizontalHeader ( ) . saveState ( ) )
|
def send_activation_email ( self , user , profile , password , site ) :
"""Custom send email method to supplied the activation link and
new generated password ."""
|
ctx_dict = { 'password' : password , 'site' : site , 'activation_key' : profile . activation_key , 'expiration_days' : settings . ACCOUNT_ACTIVATION_DAYS }
subject = render_to_string ( 'registration/email/emails/password_subject.txt' , ctx_dict )
# Email subject * must not * contain newlines
subject = '' . join ( subject . splitlines ( ) )
message = render_to_string ( 'registration/email/emails/password.txt' , ctx_dict )
try :
user . email_user ( subject , message , settings . DEFAULT_FROM_EMAIL )
except :
pass
|
def remove ( self ) :
"""Interface to remove a migration request from the queue .
Only Permanent FAILED / 9 and PENDING / 0 requests can be removed
( running and sucessed requests cannot be removed )"""
|
body = request . body . read ( )
indata = cjson . decode ( body )
try :
indata = validateJSONInputNoCopy ( "migration_rqst" , indata )
return self . dbsMigrate . removeMigrationRequest ( indata )
except dbsException as he :
dbsExceptionHandler ( he . eCode , he . message , self . logger . exception , he . message )
except Exception as e :
if e . code == 400 :
dbsExceptionHandler ( 'dbsException-invalid-input2' , str ( e ) , self . logger . exception , str ( e ) )
else :
dbsExceptionHandler ( 'dbsException-server-error' , dbsExceptionCode [ 'dbsException-server-error' ] , self . logger . exception , str ( e ) )
|
def convert_nb ( fname , dest_path = '.' ) :
"Convert a notebook ` fname ` to html file in ` dest _ path ` ."
|
from . gen_notebooks import remove_undoc_cells , remove_code_cell_jupyter_widget_state_elem
nb = read_nb ( fname )
nb [ 'cells' ] = remove_undoc_cells ( nb [ 'cells' ] )
nb [ 'cells' ] = remove_code_cell_jupyter_widget_state_elem ( nb [ 'cells' ] )
fname = Path ( fname ) . absolute ( )
dest_name = fname . with_suffix ( '.html' ) . name
meta = nb [ 'metadata' ]
meta_jekyll = meta [ 'jekyll' ] if 'jekyll' in meta else { 'title' : fname . with_suffix ( '' ) . name }
meta_jekyll [ 'nb_path' ] = f'{fname.parent.name}/{fname.name}'
with open ( f'{dest_path}/{dest_name}' , 'w' ) as f :
f . write ( exporter . from_notebook_node ( nb , resources = meta_jekyll ) [ 0 ] )
|
def push_notebook ( document = None , state = None , handle = None ) :
'''Update Bokeh plots in a Jupyter notebook output cells with new data
or property values .
When working the the notebook , the ` ` show ` ` function can be passed the
argument ` ` notebook _ handle = True ` ` , which will cause it to return a
handle object that can be used to update the Bokeh output later . When
` ` push _ notebook ` ` is called , any property updates ( e . g . plot titles or
data source values , etc . ) since the last call to ` ` push _ notebook ` ` or
the original ` ` show ` ` call are applied to the Bokeh output in the
previously rendered Jupyter output cell .
Several example notebooks can be found in the GitHub repository in
the : bokeh - tree : ` examples / howto / notebook _ comms ` directory .
Args :
document ( Document , optional ) :
A : class : ` ~ bokeh . document . Document ` to push from . If None ,
uses ` ` curdoc ( ) ` ` . ( default : None )
state ( State , optional ) :
A : class : ` State ` object . If None , then the current default
state ( set by ` ` output _ file ` ` , etc . ) is used . ( default : None )
Returns :
None
Examples :
Typical usage is typically similar to this :
. . code - block : : python
from bokeh . plotting import figure
from bokeh . io import output _ notebook , push _ notebook , show
output _ notebook ( )
plot = figure ( )
plot . circle ( [ 1,2,3 ] , [ 4,6,5 ] )
handle = show ( plot , notebook _ handle = True )
# Update the plot title in the earlier cell
plot . title . text = " New Title "
push _ notebook ( handle = handle )'''
|
from . . protocol import Protocol
if state is None :
state = curstate ( )
if not document :
document = state . document
if not document :
warn ( "No document to push" )
return
if handle is None :
handle = state . last_comms_handle
if not handle :
warn ( "Cannot find a last shown plot to update. Call output_notebook() and show(..., notebook_handle=True) before push_notebook()" )
return
events = list ( handle . doc . _held_events )
# This is to avoid having an exception raised for attempting to create a
# PATCH - DOC with no events . In the notebook , we just want to silently
# ignore calls to push _ notebook when there are no new events
if len ( events ) == 0 :
return
handle . doc . _held_events = [ ]
msg = Protocol ( "1.0" ) . create ( "PATCH-DOC" , events )
handle . comms . send ( msg . header_json )
handle . comms . send ( msg . metadata_json )
handle . comms . send ( msg . content_json )
for header , payload in msg . buffers :
handle . comms . send ( json . dumps ( header ) )
handle . comms . send ( buffers = [ payload ] )
|
def convert_docx_to_text ( filename : str = None , blob : bytes = None , config : TextProcessingConfig = _DEFAULT_CONFIG ) -> str :
"""Converts a DOCX file to text .
Pass either a filename or a binary object .
Args :
filename : filename to process
blob : binary ` ` bytes ` ` object to process
config : : class : ` TextProcessingConfig ` control object
Returns :
text contents
Notes :
- Old ` ` docx ` ` ( https : / / pypi . python . org / pypi / python - docx ) has been
superseded ( see https : / / github . com / mikemaccana / python - docx ) .
- ` ` docx . opendocx ( file ) ` ` uses : class : ` zipfile . ZipFile ` , which can take
either a filename or a file - like object
( https : / / docs . python . org / 2 / library / zipfile . html ) .
- Method was :
. . code - block : : python
with get _ filelikeobject ( filename , blob ) as fp :
document = docx . opendocx ( fp )
paratextlist = docx . getdocumenttext ( document )
return ' \n \n ' . join ( paratextlist )
- Newer ` ` docx ` ` is python - docx
- https : / / pypi . python . org / pypi / python - docx
- https : / / python - docx . readthedocs . org / en / latest /
- http : / / stackoverflow . com / questions / 25228106
However , it uses ` ` lxml ` ` , which has C dependencies , so it doesn ' t always
install properly on e . g . bare Windows machines .
PERFORMANCE of my method :
- nice table formatting
- but tables grouped at end , not in sensible places
- can iterate via ` ` doc . paragraphs ` ` and ` ` doc . tables ` ` but not in
true document order , it seems
- others have noted this too :
- https : / / github . com / python - openxml / python - docx / issues / 40
- https : / / github . com / deanmalmgren / textract / pull / 92
- ` ` docx2txt ` ` is at https : / / pypi . python . org / pypi / docx2txt / 0.6 ; this is
pure Python . Its command - line function appears to be for Python 2 only
(2016-04-21 : crashes under Python 3 ; is due to an encoding bug ) . However ,
it seems fine as a library . It doesn ' t handle in - memory blobs properly ,
though , so we need to extend it .
PERFORMANCE OF ITS ` ` process ( ) ` ` function :
- all text comes out
- table text is in a sensible place
- table formatting is lost .
- Other manual methods ( not yet implemented ) :
http : / / etienned . github . io / posts / extract - text - from - word - docx - simply / .
Looks like it won ' t deal with header stuff ( etc . ) that ` ` docx2txt ` `
handles .
- Upshot : we need a DIY version .
- See also this " compile lots of techniques " libraries , which has C
dependencies : http : / / textract . readthedocs . org / en / latest /"""
|
if True :
text = ''
with get_filelikeobject ( filename , blob ) as fp :
for xml in gen_xml_files_from_docx ( fp ) :
text += docx_text_from_xml ( xml , config )
return text
|
def display_modules_list ( self ) :
"""Display modules list"""
|
print ( "Plugins list: {}" . format ( ', ' . join ( sorted ( self . stats . getPluginsList ( enable = False ) ) ) ) )
print ( "Exporters list: {}" . format ( ', ' . join ( sorted ( self . stats . getExportsList ( enable = False ) ) ) ) )
|
def _make_request_with_auth_fallback ( self , url , headers = None , params = None ) :
"""Generic request handler for OpenStack API requests
Raises specialized Exceptions for commonly encountered error codes"""
|
self . log . debug ( "Request URL and Params: %s, %s" , url , params )
try :
resp = requests . get ( url , headers = headers , verify = self . _ssl_verify , params = params , timeout = DEFAULT_API_REQUEST_TIMEOUT , proxies = self . proxy_config , )
resp . raise_for_status ( )
except requests . exceptions . HTTPError as e :
self . log . debug ( "Error contacting openstack endpoint: %s" , e )
if resp . status_code == 401 :
self . log . info ( 'Need to reauthenticate before next check' )
# Delete the scope , we ' ll populate a new one on the next run for this instance
self . delete_current_scope ( )
elif resp . status_code == 409 :
raise InstancePowerOffFailure ( )
elif resp . status_code == 404 :
raise e
else :
raise
return resp . json ( )
|
def merge_entries_with_common_prefixes ( list_ , number_of_needed_commons = 6 ) :
"""Returns a list where sequences of post - fixed entries are shortened to their common prefix .
This might be useful in cases of several similar values ,
where the prefix is identical for several entries .
If less than ' number _ of _ needed _ commons ' are identically prefixed , they are kept unchanged .
Example : [ ' test ' , ' pc1 ' , ' pc2 ' , ' pc3 ' , . . . , ' pc10 ' ] - > [ ' test ' , ' pc * ' ]"""
|
# first find common entry - sequences
prefix = None
lists_to_merge = [ ]
for entry in list_ :
newPrefix , number = split_string_at_suffix ( entry , numbers_into_suffix = True )
if entry == newPrefix or prefix != newPrefix :
lists_to_merge . append ( [ ] )
prefix = newPrefix
lists_to_merge [ - 1 ] . append ( ( entry , newPrefix , number ) )
# then merge them
returnvalue = [ ]
for common_entries in lists_to_merge :
common_prefix = common_entries [ 0 ] [ 1 ]
assert all ( common_prefix == prefix for entry , prefix , number in common_entries )
if len ( common_entries ) <= number_of_needed_commons :
returnvalue . extend ( ( entry for entry , prefix , number in common_entries ) )
else : # we use ' * ' to indicate several entries ,
# it would also be possible to use ' [ min , max ] ' from ' ( n for e , p , n in common _ entries ) '
returnvalue . append ( common_prefix + '*' )
return returnvalue
|
def fetch_certs ( certificate_list , user_agent = None , timeout = 10 ) :
"""Fetches certificates from the authority information access extension of
an asn1crypto . crl . CertificateList object and places them into the
cert registry .
: param certificate _ list :
An asn1crypto . crl . CertificateList object
: param user _ agent :
The HTTP user agent to use when requesting the CRL . If None ,
a default is used in the format " certvalidation 1.0.0 " .
: param timeout :
The number of seconds after which an HTTP request should timeout
: raises :
urllib . error . URLError / urllib2 . URLError - when a URL / HTTP error occurs
socket . error - when a socket error occurs
: return :
A list of any asn1crypto . x509 . Certificate objects that were fetched"""
|
output = [ ]
if user_agent is None :
user_agent = 'certvalidator %s' % __version__
elif not isinstance ( user_agent , str_cls ) :
raise TypeError ( 'user_agent must be a unicode string, not %s' % type_name ( user_agent ) )
for url in certificate_list . issuer_cert_urls :
request = Request ( url )
request . add_header ( 'Accept' , 'application/pkix-cert,application/pkcs7-mime' )
request . add_header ( 'User-Agent' , user_agent )
response = urlopen ( request , None , timeout )
content_type = response . headers [ 'Content-Type' ] . strip ( )
response_data = response . read ( )
if content_type == 'application/pkix-cert' :
output . append ( x509 . Certificate . load ( response_data ) )
elif content_type == 'application/pkcs7-mime' :
signed_data = cms . SignedData . load ( response_data )
if isinstance ( signed_data [ 'certificates' ] , cms . CertificateSet ) :
for cert_choice in signed_data [ 'certificates' ] :
if cert_choice . name == 'certificate' :
output . append ( cert_choice . chosen )
else :
raise ValueError ( 'Unknown content type of %s when fetching issuer certificate for CRL' % repr ( content_type ) )
return output
|
def readGlobalFile ( self , fileStoreID , userPath = None , cache = True , mutable = False , symlink = False ) :
"""Downloads a file described by fileStoreID from the file store to the local directory .
The function first looks for the file in the cache and if found , it hardlinks to the
cached copy instead of downloading .
The cache parameter will be used only if the file isn ' t already in the cache , and
provided user path ( if specified ) is in the scope of local temp dir .
: param bool cache : If True , a copy of the file will be saved into a cache that can be
used by other workers . caching supports multiple concurrent workers requesting the
same file by allowing only one to download the file while the others wait for it to
complete .
: param bool mutable : If True , the file path returned points to a file that is
modifiable by the user . Using False is recommended as it saves disk by making
multiple workers share a file via hard links . The default is False ."""
|
# Check that the file hasn ' t been deleted by the user
if fileStoreID in self . filesToDelete :
raise RuntimeError ( 'Trying to access a file in the jobStore you\'ve deleted: ' + '%s' % fileStoreID )
# Get the name of the file as it would be in the cache
cachedFileName = self . encodedFileID ( fileStoreID )
# setup the harbinger variable for the file . This is an identifier that the file is
# currently being downloaded by another job and will be in the cache shortly . It is used
# to prevent multiple jobs from simultaneously downloading the same file from the file
# store .
harbingerFile = self . HarbingerFile ( self , cachedFileName = cachedFileName )
# setup the output filename . If a name is provided , use it - This makes it a Named
# Local File . If a name isn ' t provided , use the base64 encoded name such that we can
# easily identify the files later on .
if userPath is not None :
localFilePath = self . _resolveAbsoluteLocalPath ( userPath )
if os . path . exists ( localFilePath ) : # yes , this is illegal now .
raise RuntimeError ( ' File %s ' % localFilePath + ' exists. Cannot Overwrite.' )
fileIsLocal = True if localFilePath . startswith ( self . localTempDir ) else False
else :
localFilePath = self . getLocalTempFileName ( )
fileIsLocal = True
# First check whether the file is in cache . If it is , then hardlink the file to
# userPath . Cache operations can only occur on local files .
with self . cacheLock ( ) as lockFileHandle :
if fileIsLocal and self . _fileIsCached ( fileStoreID ) :
logger . debug ( 'CACHE: Cache hit on file with ID \'%s\'.' % fileStoreID )
assert not os . path . exists ( localFilePath )
if mutable :
shutil . copyfile ( cachedFileName , localFilePath )
cacheInfo = self . _CacheState . _load ( self . cacheStateFile )
jobState = self . _JobState ( cacheInfo . jobState [ self . jobID ] )
jobState . addToJobSpecFiles ( fileStoreID , localFilePath , - 1 , None )
cacheInfo . jobState [ self . jobID ] = jobState . __dict__
cacheInfo . write ( self . cacheStateFile )
else :
os . link ( cachedFileName , localFilePath )
self . returnFileSize ( fileStoreID , localFilePath , lockFileHandle , fileAlreadyCached = True )
# If the file is not in cache , check whether the . harbinger file for the given
# FileStoreID exists . If it does , the wait and periodically check for the removal
# of the file and the addition of the completed download into cache of the file by
# the other job . Then we link to it .
elif fileIsLocal and harbingerFile . exists ( ) :
harbingerFile . waitOnDownload ( lockFileHandle )
# If the code reaches here , the harbinger file has been removed . This means
# either the file was successfully downloaded and added to cache , or something
# failed . To prevent code duplication , we recursively call readGlobalFile .
flock ( lockFileHandle , LOCK_UN )
return self . readGlobalFile ( fileStoreID , userPath = userPath , cache = cache , mutable = mutable )
# If the file is not in cache , then download it to the userPath and then add to
# cache if specified .
else :
logger . debug ( 'CACHE: Cache miss on file with ID \'%s\'.' % fileStoreID )
if fileIsLocal and cache : # If caching of the downloaded file is desired , First create the harbinger
# file so other jobs know not to redundantly download the same file . Write
# the PID of this process into the file so other jobs know who is carrying
# out the download .
harbingerFile . write ( )
# Now release the file lock while the file is downloaded as download could
# take a while .
flock ( lockFileHandle , LOCK_UN )
# Use try : finally : so that the . harbinger file is removed whether the
# download succeeds or not .
try :
self . jobStore . readFile ( fileStoreID , '/.' . join ( os . path . split ( cachedFileName ) ) , symlink = False )
except :
if os . path . exists ( '/.' . join ( os . path . split ( cachedFileName ) ) ) :
os . remove ( '/.' . join ( os . path . split ( cachedFileName ) ) )
raise
else : # If the download succeded , officially add the file to cache ( by
# recording it in the cache lock file ) if possible .
if os . path . exists ( '/.' . join ( os . path . split ( cachedFileName ) ) ) :
os . rename ( '/.' . join ( os . path . split ( cachedFileName ) ) , cachedFileName )
# If this is not true we get into trouble in our internal reference counting .
assert ( os . stat ( cachedFileName ) . st_nlink == self . nlinkThreshold )
self . addToCache ( localFilePath , fileStoreID , 'read' , mutable )
# We don ' t need to return the file size here because addToCache
# already does it for us
finally : # In any case , delete the harbinger file .
harbingerFile . delete ( )
else : # Release the cache lock since the remaining stuff is not cache related .
flock ( lockFileHandle , LOCK_UN )
self . jobStore . readFile ( fileStoreID , localFilePath , symlink = False )
# Make sure we got a file with the number of links we expect .
# If this is not true we get into trouble in our internal reference counting .
assert ( os . stat ( localFilePath ) . st_nlink == self . nlinkThreshold )
os . chmod ( localFilePath , stat . S_IRUSR | stat . S_IRGRP | stat . S_IROTH )
# Now that we have the file , we have 2 options . It ' s modifiable or not .
# Either way , we need to account for FileJobStore making links instead of
# copies .
if mutable :
if self . nlinkThreshold == 2 : # nlinkThreshold can only be 1 or 2 and it can only be 2 iff the
# job store is FilejobStore , and the job store and local temp dir
# are on the same device . An atomic rename removes the nlink on the
# file handle linked from the job store .
shutil . copyfile ( localFilePath , localFilePath + '.tmp' )
os . rename ( localFilePath + '.tmp' , localFilePath )
self . _JobState . updateJobSpecificFiles ( self , fileStoreID , localFilePath , - 1 , False )
# If it was immutable
else :
if self . nlinkThreshold == 2 :
self . _accountForNlinkEquals2 ( localFilePath )
self . _JobState . updateJobSpecificFiles ( self , fileStoreID , localFilePath , 0.0 , False )
return localFilePath
|
def links ( self ) :
"""Include previous and next links ."""
|
links = super ( OffsetLimitPaginatedList , self ) . links
if self . _page . offset + self . _page . limit < self . count :
links [ "next" ] = Link . for_ ( self . _operation , self . _ns , qs = self . _page . next_page . to_items ( ) , ** self . _context )
if self . offset > 0 :
links [ "prev" ] = Link . for_ ( self . _operation , self . _ns , qs = self . _page . prev_page . to_items ( ) , ** self . _context )
return links
|
def useful_mimetype ( text ) :
"""Check to see if the given mime type is a MIME type
which is useful in terms of how to treat this file ."""
|
if text is None :
return False
mimetype = normalize_mimetype ( text )
return mimetype not in [ DEFAULT , PLAIN , None ]
|
def upload_from_shared_memory ( self , location , bbox , order = 'F' , cutout_bbox = None ) :
"""Upload from a shared memory array .
https : / / github . com / seung - lab / cloud - volume / wiki / Advanced - Topic : - Shared - Memory
tip : If you want to use slice notation , np . s _ [ . . . ] will help in a pinch .
MEMORY LIFECYCLE WARNING : You are responsible for managing the lifecycle of the
shared memory . CloudVolume will merely read from it , it will not unlink the
memory automatically . To fully clear the shared memory you must unlink the
location and close any mmap file handles . You can use ` cloudvolume . sharedmemory . unlink ( . . . ) `
to help you unlink the shared memory file .
EXPERT MODE WARNING : If you aren ' t sure you need this function ( e . g . to relieve
memory pressure or improve performance in some way ) you should use the ordinary
upload method of vol [ : ] = img . A typical use case is transferring arrays between
different processes without making copies . For reference , this feature was created
for uploading a 62 GB array that originated in Julia .
Required :
location : ( str ) Shared memory location e . g . ' cloudvolume - shm - RANDOM - STRING '
This typically corresponds to a file in ` / dev / shm ` or ` / run / shm / ` . It can
also be a file if you ' re using that for mmap .
bbox : ( Bbox or list of slices ) the bounding box the shared array represents . For instance
if you have a 1024x1024x128 volume and you ' re uploading only a 512x512x64 corner
touching the origin , your Bbox would be ` Bbox ( ( 0,0,0 ) , ( 512,512,64 ) ) ` .
Optional :
cutout _ bbox : ( bbox or list of slices ) If you only want to upload a section of the
array , give the bbox in volume coordinates ( not image coordinates ) that should
be cut out . For example , if you only want to upload 256x256x32 of the upper
rightmost corner of the above example but the entire 512x512x64 array is stored
in memory , you would provide : ` Bbox ( ( 256 , 256 , 32 ) , ( 512 , 512 , 64 ) ) `
By default , just upload the entire image .
Returns : void"""
|
def tobbox ( x ) :
if type ( x ) == Bbox :
return x
return Bbox . from_slices ( x )
bbox = tobbox ( bbox )
cutout_bbox = tobbox ( cutout_bbox ) if cutout_bbox else bbox . clone ( )
if not bbox . contains_bbox ( cutout_bbox ) :
raise exceptions . AlignmentError ( """
The provided cutout is not wholly contained in the given array.
Bbox: {}
Cutout: {}
""" . format ( bbox , cutout_bbox ) )
if self . autocrop :
cutout_bbox = Bbox . intersection ( cutout_bbox , self . bounds )
if cutout_bbox . subvoxel ( ) :
return
shape = list ( bbox . size3 ( ) ) + [ self . num_channels ]
mmap_handle , shared_image = sharedmemory . ndarray ( location = location , shape = shape , dtype = self . dtype , order = order , readonly = True )
delta_box = cutout_bbox . clone ( ) - bbox . minpt
cutout_image = shared_image [ delta_box . to_slices ( ) ]
txrx . upload_image ( self , cutout_image , cutout_bbox . minpt , parallel = self . parallel , manual_shared_memory_id = location , manual_shared_memory_bbox = bbox , manual_shared_memory_order = order )
mmap_handle . close ( )
|
def iris ( display = False ) :
"""Return the classic iris data in a nice package ."""
|
d = sklearn . datasets . load_iris ( )
df = pd . DataFrame ( data = d . data , columns = d . feature_names )
# pylint : disable = E1101
if display :
return df , [ d . target_names [ v ] for v in d . target ]
# pylint : disable = E1101
else :
return df , d . target
|
def _create_gate_variables ( self , input_shape , dtype ) :
"""Initialize the variables used for the gates ."""
|
if len ( input_shape ) != 2 :
raise ValueError ( "Rank of shape must be {} not: {}" . format ( 2 , len ( input_shape ) ) )
equiv_input_size = self . _hidden_state_size + input_shape . dims [ 1 ] . value
initializer = basic . create_linear_initializer ( equiv_input_size )
self . _w_xh = tf . get_variable ( self . W_GATES , shape = [ equiv_input_size , 4 * self . _hidden_size ] , dtype = dtype , initializer = self . _initializers . get ( self . W_GATES , initializer ) , partitioner = self . _partitioners . get ( self . W_GATES ) , regularizer = self . _regularizers . get ( self . W_GATES ) )
self . _b = tf . get_variable ( self . B_GATES , shape = [ 4 * self . _hidden_size ] , dtype = dtype , initializer = self . _initializers . get ( self . B_GATES , initializer ) , partitioner = self . _partitioners . get ( self . B_GATES ) , regularizer = self . _regularizers . get ( self . B_GATES ) )
if self . _use_projection :
w_h_initializer = basic . create_linear_initializer ( self . _hidden_size )
self . _w_h_projection = tf . get_variable ( self . W_H_PROJECTION , shape = [ self . _hidden_size , self . _hidden_state_size ] , dtype = dtype , initializer = self . _initializers . get ( self . W_H_PROJECTION , w_h_initializer ) , partitioner = self . _partitioners . get ( self . W_H_PROJECTION ) , regularizer = self . _regularizers . get ( self . W_H_PROJECTION ) )
|
def check_request ( headers : Headers ) -> str :
"""Check a handshake request received from the client .
If the handshake is valid , this function returns the ` ` key ` ` which must be
passed to : func : ` build _ response ` .
Otherwise it raises an : exc : ` ~ websockets . exceptions . InvalidHandshake `
exception and the server must return an error like 400 Bad Request .
This function doesn ' t verify that the request is an HTTP / 1.1 or higher GET
request and doesn ' t perform Host and Origin checks . These controls are
usually performed earlier in the HTTP request handling code . They ' re the
responsibility of the caller ."""
|
connection = sum ( [ parse_connection ( value ) for value in headers . get_all ( "Connection" ) ] , [ ] )
if not any ( value . lower ( ) == "upgrade" for value in connection ) :
raise InvalidUpgrade ( "Connection" , ", " . join ( connection ) )
upgrade = sum ( [ parse_upgrade ( value ) for value in headers . get_all ( "Upgrade" ) ] , [ ] )
# For compatibility with non - strict implementations , ignore case when
# checking the Upgrade header . It ' s supposed to be ' WebSocket ' .
if not ( len ( upgrade ) == 1 and upgrade [ 0 ] . lower ( ) == "websocket" ) :
raise InvalidUpgrade ( "Upgrade" , ", " . join ( upgrade ) )
try :
s_w_key = headers [ "Sec-WebSocket-Key" ]
except KeyError :
raise InvalidHeader ( "Sec-WebSocket-Key" )
except MultipleValuesError :
raise InvalidHeader ( "Sec-WebSocket-Key" , "more than one Sec-WebSocket-Key header found" )
try :
raw_key = base64 . b64decode ( s_w_key . encode ( ) , validate = True )
except binascii . Error :
raise InvalidHeaderValue ( "Sec-WebSocket-Key" , s_w_key )
if len ( raw_key ) != 16 :
raise InvalidHeaderValue ( "Sec-WebSocket-Key" , s_w_key )
try :
s_w_version = headers [ "Sec-WebSocket-Version" ]
except KeyError :
raise InvalidHeader ( "Sec-WebSocket-Version" )
except MultipleValuesError :
raise InvalidHeader ( "Sec-WebSocket-Version" , "more than one Sec-WebSocket-Version header found" )
if s_w_version != "13" :
raise InvalidHeaderValue ( "Sec-WebSocket-Version" , s_w_version )
return s_w_key
|
def fpr ( y , z ) :
"""False positive rate ` fp / ( fp + tn ) `"""
|
tp , tn , fp , fn = contingency_table ( y , z )
return fp / ( fp + tn )
|
def register_on_extra_data_changed ( self , callback ) :
"""Set the callback function to consume on extra data changed
events .
Callback receives a IExtraDataChangedEvent object .
Returns the callback _ id"""
|
event_type = library . VBoxEventType . on_extra_data_changed
return self . event_source . register_callback ( callback , event_type )
|
def is_stats_query ( query ) :
"""check if the query is a normal search or select query
: param query :
: return :"""
|
if not query :
return False
# remove all " enclosed strings
nq = re . sub ( r'"[^"]*"' , '' , query )
# check if there ' s | . . . . select
if re . findall ( r'\|.*\bselect\b' , nq , re . I | re . DOTALL ) :
return True
return False
|
def copy ( self ) :
"""Return a new instance with the same attributes ."""
|
return self . __class__ ( [ b . copy ( ) for b in self . blocks ] , tuple ( self . pos ) if self . pos else None )
|
def main ( ) :
'''main routine'''
|
# process arguments
if len ( sys . argv ) < 4 :
usage ( )
rgname = sys . argv [ 1 ]
vmss_name = sys . argv [ 2 ]
capacity = sys . argv [ 3 ]
# Load Azure app defaults
try :
with open ( 'azurermconfig.json' ) as config_file :
config_data = json . load ( config_file )
except FileNotFoundError :
print ( "Error: Expecting azurermconfig.json in current folder" )
sys . exit ( )
tenant_id = config_data [ 'tenantId' ]
app_id = config_data [ 'appId' ]
app_secret = config_data [ 'appSecret' ]
subscription_id = config_data [ 'subscriptionId' ]
access_token = azurerm . get_access_token ( tenant_id , app_id , app_secret )
scaleoutput = azurerm . scale_vmss ( access_token , subscription_id , rgname , vmss_name , capacity )
print ( scaleoutput . text )
|
def area_fraction_dict ( self ) :
"""Returns :
( dict ) : { hkl : area _ hkl / total area on wulff }"""
|
return { hkl : self . miller_area_dict [ hkl ] / self . surface_area for hkl in self . miller_area_dict . keys ( ) }
|
def get_within_delta ( key , app = None ) :
"""Get a timedelta object from the application configuration following
the internal convention of : :
< Amount of Units > < Type of Units >
Examples of valid config values : :
5 days
10 minutes
: param key : The config value key without the ' SECURITY _ ' prefix
: param app : Optional application to inspect . Defaults to Flask ' s
` current _ app `"""
|
txt = config_value ( key , app = app )
values = txt . split ( )
return timedelta ( ** { values [ 1 ] : int ( values [ 0 ] ) } )
|
async def handler ( event ) :
"""# learn or # python : Tells the user to learn some Python first ."""
|
await asyncio . wait ( [ event . delete ( ) , event . respond ( LEARN_PYTHON , reply_to = event . reply_to_msg_id , link_preview = False ) ] )
|
def withArgs ( self , * args , ** kwargs ) : # pylint : disable = invalid - name
"""Adds a condition for when the stub is called . When the condition is met , a special
return value can be returned . Adds the specified argument ( s ) into the condition list .
For example , when the stub function is called with argument 1 , it will return " # " :
stub . withArgs ( 1 ) . returns ( " # " )
Without returns / throws at the end of the chain of functions , nothing will happen .
For example , in this case , although 1 is in the condition list , nothing will happen :
stub . withArgs ( 1)
Return :
a SinonStub object ( able to be chained )"""
|
cond_args = args if len ( args ) > 0 else None
cond_kwargs = kwargs if len ( kwargs ) > 0 else None
return _SinonStubCondition ( copy = self . _copy , cond_args = cond_args , cond_kwargs = cond_kwargs , oncall = self . _oncall )
|
def get_repository_hierarchy_session ( self , proxy ) :
"""Gets the repository hierarchy traversal session .
arg proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . repository . RepositoryHierarchySession ) - a
RepositoryHierarchySession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ repository _ hierarchy ( ) is false
compliance : optional - This method must be implemented if
supports _ repository _ hierarchy ( ) is true ."""
|
if not self . supports_repository_hierarchy ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . RepositoryHierarchySession ( proxy , runtime = self . _runtime )
except AttributeError :
raise
# OperationFailed ( )
return session
|
def fetch_data ( blob , start_index , end_index , ** options ) :
"""Fetch data for blob .
Fetches a fragment of a blob up to MAX _ BLOB _ FETCH _ SIZE in length . Attempting
to fetch a fragment that extends beyond the boundaries of the blob will return
the amount of data from start _ index until the end of the blob , which will be
a smaller size than requested . Requesting a fragment which is entirely
outside the boundaries of the blob will return empty string . Attempting
to fetch a negative index will raise an exception .
Args :
blob : BlobInfo , BlobKey , str or unicode representation of BlobKey of
blob to fetch data from .
start _ index : Start index of blob data to fetch . May not be negative .
end _ index : End index ( inclusive ) of blob data to fetch . Must be
> = start _ index .
* * options : Options for create _ rpc ( ) .
Returns :
str containing partial data of blob . If the indexes are legal but outside
the boundaries of the blob , will return empty string .
Raises :
TypeError if start _ index or end _ index are not indexes . Also when blob
is not a string , BlobKey or BlobInfo .
DataIndexOutOfRangeError when start _ index < 0 or end _ index < start _ index .
BlobFetchSizeTooLargeError when request blob fragment is larger than
MAX _ BLOB _ FETCH _ SIZE .
BlobNotFoundError when blob does not exist ."""
|
fut = fetch_data_async ( blob , start_index , end_index , ** options )
return fut . get_result ( )
|
def next_previous ( self , photo , options = None , ** kwds ) :
"""Endpoint : / photo / < id > / nextprevious [ / < options > ] . json
Returns a dict containing the next and previous photo lists
( there may be more than one next / previous photo returned ) .
The options parameter can be used to narrow down the photos
Eg : options = { " album " : < album _ id > }"""
|
option_string = self . _build_option_string ( options )
result = self . _client . get ( "/photo/%s/nextprevious%s.json" % ( self . _extract_id ( photo ) , option_string ) , ** kwds ) [ "result" ]
value = { }
if "next" in result : # Workaround for APIv1
if not isinstance ( result [ "next" ] , list ) : # pragma : no cover
result [ "next" ] = [ result [ "next" ] ]
value [ "next" ] = [ ]
for photo in result [ "next" ] :
value [ "next" ] . append ( Photo ( self . _client , photo ) )
if "previous" in result : # Workaround for APIv1
if not isinstance ( result [ "previous" ] , list ) : # pragma : no cover
result [ "previous" ] = [ result [ "previous" ] ]
value [ "previous" ] = [ ]
for photo in result [ "previous" ] :
value [ "previous" ] . append ( Photo ( self . _client , photo ) )
return value
|
async def async_run_command ( self , command , first_try = True ) :
"""Run a command through a Telnet connection .
Connect to the Telnet server if not currently connected , otherwise
use the existing connection ."""
|
await self . async_connect ( )
try :
with ( await self . _io_lock ) :
self . _writer . write ( '{}\n' . format ( "%s && %s" % ( _PATH_EXPORT_COMMAND , command ) ) . encode ( 'ascii' ) )
data = ( ( await asyncio . wait_for ( self . _reader . readuntil ( self . _prompt_string ) , 9 ) ) . split ( b'\n' ) [ 1 : - 1 ] )
except ( BrokenPipeError , LimitOverrunError ) :
if first_try :
return await self . async_run_command ( command , False )
else :
_LOGGER . warning ( "connection is lost to host." )
return [ ]
except TimeoutError :
_LOGGER . error ( "Host timeout." )
return [ ]
finally :
self . _writer . close ( )
return [ line . decode ( 'utf-8' ) for line in data ]
|
def climatology ( self , startclim , endclim , ** kwargs ) :
r"""Returns a climatology of observations at a user specified location for a specified time . Users must specify
at least one geographic search parameter ( ' stid ' , ' state ' , ' country ' , ' county ' , ' radius ' , ' bbox ' , ' cwa ' ,
' nwsfirezone ' , ' gacc ' , or ' subgacc ' ) to obtain observation data . Other parameters may also be included . See
below mandatory and optional parameters . Also see the metadata ( ) function for station IDs .
Arguments :
startclim : string , mandatory
Start date in form of MMDDhhmm . MUST BE USED WITH THE ENDCLIM PARAMETER . Default time is UTC
e . g . startclim = ' 06011800 ' Do not specify a year
endclim : string , mandatory
End date in form of MMDDhhmm . MUST BE USED WITH THE STARTCLIM PARAMETER . Default time is UTC
e . g . endclim = ' 06011800 ' Do not specify a year
obtimezone : string , optional
Set to either UTC or local . Sets timezone of obs . Default is UTC . e . g . obtimezone = ' local '
showemptystations : string , optional
Set to ' 1 ' to show stations even if no obs exist that match the time period . Stations without obs are
omitted by default .
stid : string , optional
Single or comma separated list of MesoWest station IDs . e . g . stid = ' kden , kslc , wbb '
county : string , optional
County / parish / borough ( US / Canada only ) , full name e . g . county = ' Larimer '
state : string , optional
US state , 2 - letter ID e . g . state = ' CO '
country : string , optional
Single or comma separated list of abbreviated 2 or 3 character countries e . g . country = ' us , ca , mx '
radius : string , optional
Distance from a lat / lon pt or stid as [ lat , lon , radius ( mi ) ] or [ stid , radius ( mi ) ] . e . g . radius = " - 120,40,20"
bbox : string , optional
Stations within a [ lon / lat ] box in the order [ lonmin , latmin , lonmax , latmax ] e . g . bbox = " - 120,40 , - 119,41"
cwa : string , optional
NWS county warning area . See http : / / www . nws . noaa . gov / organization . php for CWA list . e . g . cwa = ' LOX '
nwsfirezone : string , optional
NWS fire zones . See http : / / www . nws . noaa . gov / geodata / catalog / wsom / html / firezone . htm for a shapefile
containing the full list of zones . e . g . nwsfirezone = ' LOX241'
gacc : string , optional
Name of Geographic Area Coordination Center e . g . gacc = ' EBCC ' See http : / / gacc . nifc . gov / for a list of GACCs .
subgacc : string , optional
Name of Sub GACC e . g . subgacc = ' EB07'
vars : string , optional
Single or comma separated list of sensor variables . Will return all stations that match one of provided
variables . Useful for filtering all stations that sense only certain vars . Do not request vars twice in
the query . e . g . vars = ' wind _ speed , pressure ' Use the variables function to see a list of sensor vars .
status : string , optional
A value of either active or inactive returns stations currently set as active or inactive in the archive .
Omitting this param returns all stations . e . g . status = ' active '
units : string , optional
String or set of strings and pipes separated by commas . Default is metric units . Set units = ' ENGLISH ' for
FREEDOM UNITS ; ) Valid other combinations are as follows : temp | C , temp | F , temp | K ; speed | mps , speed | mph ,
speed | kph , speed | kts ; pres | pa , pres | mb ; height | m , height | ft ; precip | mm , precip | cm , precip | in ; alti | pa ,
alti | inhg . e . g . units = ' temp | F , speed | kph , metric '
groupby : string , optional
Results can be grouped by key words : state , county , country , cwa , nwszone , mwsfirezone , gacc , subgacc
e . g . groupby = ' state '
timeformat : string , optional
A python format string for returning customized date - time groups for observation times . Can include
characters . e . g . timeformat = ' % m / % d / % Y at % H : % M '
Returns :
Dictionary of climatology observations through the get _ response ( ) function .
Raises :
None ."""
|
self . _check_geo_param ( kwargs )
kwargs [ 'startclim' ] = startclim
kwargs [ 'endclim' ] = endclim
kwargs [ 'token' ] = self . token
return self . _get_response ( 'stations/climatology' , kwargs )
|
def get_issue ( issue_number , repo_name = None , profile = 'github' , output = 'min' ) :
'''Return information about a single issue in a named repository .
. . versionadded : : 2016.11.0
issue _ number
The number of the issue to retrieve .
repo _ name
The name of the repository from which to get the issue . This argument is
required , either passed via the CLI , or defined in the configured
profile . A ` ` repo _ name ` ` passed as a CLI argument will override the
repo _ name defined in the configured profile , if provided .
profile
The name of the profile configuration to use . Defaults to ` ` github ` ` .
output
The amount of data returned by each issue . Defaults to ` ` min ` ` . Change
to ` ` full ` ` to see all issue output .
CLI Example :
. . code - block : : bash
salt myminion github . get _ issue 514
salt myminion github . get _ issue 514 repo _ name = salt'''
|
org_name = _get_config_value ( profile , 'org_name' )
if repo_name is None :
repo_name = _get_config_value ( profile , 'repo_name' )
action = '/' . join ( [ 'repos' , org_name , repo_name ] )
command = 'issues/' + six . text_type ( issue_number )
ret = { }
issue_data = _query ( profile , action = action , command = command )
issue_id = issue_data . get ( 'id' )
if output == 'full' :
ret [ issue_id ] = issue_data
else :
ret [ issue_id ] = _format_issue ( issue_data )
return ret
|
def to_headers ( self , span_context ) :
"""Convert a SpanContext object to W3C Distributed Tracing headers ,
using version 0.
: type span _ context :
: class : ` ~ opencensus . trace . span _ context . SpanContext `
: param span _ context : SpanContext object .
: rtype : dict
: returns : W3C Distributed Tracing headers ."""
|
trace_id = span_context . trace_id
span_id = span_context . span_id
trace_options = span_context . trace_options . enabled
# Convert the trace options
trace_options = '01' if trace_options else '00'
headers = { _TRACEPARENT_HEADER_NAME : '00-{}-{}-{}' . format ( trace_id , span_id , trace_options ) , }
tracestate = span_context . tracestate
if tracestate :
headers [ _TRACESTATE_HEADER_NAME ] = TracestateStringFormatter ( ) . to_string ( tracestate )
return headers
|
def cytherize ( args , file ) :
"""Used by core to integrate all the pieces of information , and to interface
with the user . Compiles and cleans up ."""
|
if isOutDated ( file ) :
if isUpdated ( file ) :
response = initiateCompilation ( args , file )
else :
response = { 'returncode' : WAIT_FOR_FIX , 'output' : '' }
else :
if args [ 'timestamp' ] :
response = { 'returncode' : SKIPPED_COMPILATION , 'output' : '' }
else :
response = initiateCompilation ( args , file )
time . sleep ( INTERVAL )
if response [ 'returncode' ] == ERROR_PASSOFF :
file [ 'stamp_if_error' ] = time . time ( )
if args [ 'watch' ] :
if len ( args [ 'filenames' ] ) > 1 :
output = "Error in file: '{}'; Cyther will wait until it is" "fixed...\n" . format ( file [ 'file_path' ] )
else :
output = "Cyther will wait for you to fix this error before" "it tries to compile again...\n"
else :
output = "Error in source file, see above\n"
elif response [ 'returncode' ] == SKIPPED_COMPILATION :
if not args [ 'watch' ] :
output = 'Skipping compilation: source file not updated since' 'last compile\n'
else :
output = ''
elif response [ 'returncode' ] == WAIT_FOR_FIX :
output = ''
elif response [ 'returncode' ] == FINE :
if args [ 'watch' ] :
if len ( args [ 'filenames' ] ) > 1 :
output = "Compiled the file '{}'\n" . format ( file [ 'file_path' ] )
else :
output = 'Compiled the file\n'
else :
if not args [ 'concise' ] :
output = 'Compilation complete\n'
else :
output = ''
else :
raise CytherError ( "Unrecognized return value '{}'" "" . format ( response [ 'returncode' ] ) )
response [ 'output' ] += output
condition = response [ 'returncode' ] == SKIPPED_COMPILATION and not args [ 'watch' ]
if ( args [ 'execute' ] or args [ 'timer' ] ) and response [ 'returncode' ] == FINE or condition :
ret = cueExtractAndRun ( args , file )
response [ 'output' ] += ret [ 'output' ]
if args [ 'watch' ] :
if response [ 'returncode' ] == FINE or response [ 'returncode' ] == ERROR_PASSOFF :
if response [ 'returncode' ] == FINE :
args [ 'watch_stats' ] [ 'compiles' ] += 1
else :
args [ 'watch_stats' ] [ 'errors' ] += 1
args [ 'watch_stats' ] [ 'counter' ] += 1
response [ 'output' ] += WATCH_STATS_TEMPLATE . format ( args [ 'watch_stats' ] [ 'counter' ] , args [ 'watch_stats' ] [ 'compiles' ] , args [ 'watch_stats' ] [ 'errors' ] , args [ 'watch_stats' ] [ 'polls' ] )
else :
args [ 'watch_stats' ] [ 'polls' ] += 1
if args [ 'watch' ] :
if response [ 'returncode' ] == 1 :
print ( response [ 'output' ] + '\n' )
else :
if response [ 'output' ] :
print ( response [ 'output' ] )
else :
if response [ 'returncode' ] == 1 :
if args [ 'error' ] :
raise CytherError ( response [ 'output' ] )
else :
print ( response [ 'output' ] )
else :
print ( response [ 'output' ] )
|
def _index_counter_keys ( self , counter , unknown_token , reserved_tokens , most_freq_count , min_freq ) :
"""Indexes keys of ` counter ` .
Indexes keys of ` counter ` according to frequency thresholds such as ` most _ freq _ count ` and
` min _ freq ` ."""
|
assert isinstance ( counter , collections . Counter ) , '`counter` must be an instance of collections.Counter.'
unknown_and_reserved_tokens = set ( reserved_tokens ) if reserved_tokens is not None else set ( )
unknown_and_reserved_tokens . add ( unknown_token )
token_freqs = sorted ( counter . items ( ) , key = lambda x : x [ 0 ] )
token_freqs . sort ( key = lambda x : x [ 1 ] , reverse = True )
token_cap = len ( unknown_and_reserved_tokens ) + ( len ( counter ) if most_freq_count is None else most_freq_count )
for token , freq in token_freqs :
if freq < min_freq or len ( self . _idx_to_token ) == token_cap :
break
if token not in unknown_and_reserved_tokens :
self . _idx_to_token . append ( token )
self . _token_to_idx [ token ] = len ( self . _idx_to_token ) - 1
|
def insert ( self , ns , docid , raw , ** kw ) :
"""Perform a single insert operation .
{ ' docid ' : ObjectId ( ' 4e95ae77a20e6164850761cd ' ) ,
' ns ' : u ' mydb . tweets ' ,
' raw ' : { u ' h ' : - 1469300750073380169L ,
u ' ns ' : u ' mydb . tweets ' ,
u ' o ' : { u ' _ id ' : ObjectId ( ' 4e95ae77a20e6164850761cd ' ) ,
u ' content ' : u ' Lorem ipsum ' ,
u ' nr ' : 16 } ,
u ' op ' : u ' i ' ,
u ' ts ' : Timestamp ( 1318432375 , 1 ) } }"""
|
try :
self . _dest_coll ( ns ) . insert ( raw [ 'o' ] , safe = True )
except DuplicateKeyError , e :
logging . warning ( e )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.