signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def describe_splits_ex ( self , cfName , start_token , end_token , keys_per_split ) :
"""Parameters :
- cfName
- start _ token
- end _ token
- keys _ per _ split"""
|
self . _seqid += 1
d = self . _reqs [ self . _seqid ] = defer . Deferred ( )
self . send_describe_splits_ex ( cfName , start_token , end_token , keys_per_split )
return d
|
async def pixy_init ( self , max_blocks = 5 , cb = None , cb_type = None ) :
"""Initialize Pixy and enable Pixy block reporting .
This is a FirmataPlusRB feature .
: param cb : callback function to report Pixy blocks
: param cb _ type : Constants . CB _ TYPE _ DIRECT = direct call or
Constants . CB _ TYPE _ ASYNCIO = asyncio coroutine
: param max _ blocks : Maximum number of Pixy blocks to report when many signatures are found .
: returns : No return value ."""
|
if cb :
self . digital_pins [ PrivateConstants . PIN_PIXY_MOSI ] . cb = cb
# Pixy uses SPI . Pin 11 is MOSI .
if cb_type :
self . digital_pins [ PrivateConstants . PIN_PIXY_MOSI ] . cb_type = cb_type
data = [ PrivateConstants . PIXY_INIT , max_blocks & 0x7f ]
await self . _send_sysex ( PrivateConstants . PIXY_CONFIG , data )
|
def get_async_pillar ( opts , grains , minion_id , saltenv = None , ext = None , funcs = None , pillar_override = None , pillarenv = None , extra_minion_data = None ) :
'''Return the correct pillar driver based on the file _ client option'''
|
file_client = opts [ 'file_client' ]
if opts . get ( 'master_type' ) == 'disable' and file_client == 'remote' :
file_client = 'local'
ptype = { 'remote' : AsyncRemotePillar , 'local' : AsyncPillar , } . get ( file_client , AsyncPillar )
return ptype ( opts , grains , minion_id , saltenv , ext , functions = funcs , pillar_override = pillar_override , pillarenv = pillarenv , extra_minion_data = extra_minion_data )
|
def update ( self , data = None , ** kwargs ) :
"""Update the record right away .
: param data : dictionary of changes
: param kwargs : possibly a list of keyword args to change"""
|
if data is None :
data = { }
data . update ( kwargs )
return self . model . write ( [ self . id ] , data )
|
def create_double ( self , value : float ) -> Double :
"""Creates a new : class : ` ConstantDouble ` , adding it to the pool and
returning it .
: param value : The value of the new Double ."""
|
self . append ( ( 6 , value ) )
self . append ( None )
return self . get ( self . raw_count - 2 )
|
def transform ( self , X , y = None ) :
''': param X : list of dict which contains metabolic measurements .'''
|
return Parallel ( n_jobs = self . n_jobs ) ( delayed ( self . _transform ) ( x ) for x in X )
|
def load_wavefront ( file_obj , resolver = None , ** kwargs ) :
"""Loads an ascii Wavefront OBJ file _ obj into kwargs
for the Trimesh constructor .
Vertices with the same position but different normals or uvs
are split into multiple vertices .
Colors are discarded .
Parameters
file _ obj : file object
Containing a wavefront file
resolver : trimesh . visual . Resolver or None
For loading referenced files , like MTL or textures
kwargs : * *
Passed to trimesh . Trimesh . _ _ init _ _
Returns
loaded : dict
kwargs for Trimesh constructor"""
|
# make sure text is UTF - 8 with only \ n newlines
text = file_obj . read ( )
if hasattr ( text , 'decode' ) :
text = text . decode ( 'utf-8' )
text = text . replace ( '\r\n' , '\n' ) . replace ( '\r' , '\n' ) + ' \n'
meshes = [ ]
def append_mesh ( ) : # append kwargs for a Trimesh constructor
# to our list of meshes
if len ( current [ 'f' ] ) > 0 : # get vertices as clean numpy array
vertices = np . array ( current [ 'v' ] , dtype = np . float64 ) . reshape ( ( - 1 , 3 ) )
# do the same for faces
faces = np . array ( current [ 'f' ] , dtype = np . int64 ) . reshape ( ( - 1 , 3 ) )
# get keys and values of remap as numpy arrays
# we are going to try to preserve the order as
# much as possible by sorting by remap key
keys , values = ( np . array ( list ( remap . keys ( ) ) ) , np . array ( list ( remap . values ( ) ) ) )
try : # if we sort keys as strings they will be an
# ordering like ( 1/1/1 , 10/10/10 ) vs ( 1/1/1 , 2/2/2)
# so try to convert to int before sorting
split = np . array ( [ i . split ( '/' ) [ 0 ] for i in keys ] , dtype = np . int )
order = split . argsort ( )
except BaseException : # we can still use arbitrary order as a fallback
order = keys . argsort ( )
# new order of vertices
vert_order = values [ order ]
# we need to mask to preserve index relationship
# between faces and vertices
face_order = np . zeros ( len ( vertices ) , dtype = np . int64 )
face_order [ vert_order ] = np . arange ( len ( vertices ) , dtype = np . int64 )
# apply the ordering and put into kwarg dict
loaded = { 'vertices' : vertices [ vert_order ] , 'faces' : face_order [ faces ] , 'metadata' : { 'object_name' : object_name } }
# handle vertex normals
if len ( current [ 'vn' ] ) > 0 :
normals = np . array ( current [ 'vn' ] , dtype = np . float64 ) . reshape ( ( - 1 , 3 ) )
loaded [ 'vertex_normals' ] = normals [ vert_order ]
# build face groups information
# faces didn ' t move around so we don ' t have to reindex
if len ( current [ 'g' ] ) > 0 :
face_groups = np . zeros ( len ( current [ 'f' ] ) // 3 , dtype = np . int64 )
for idx , start_f in current [ 'g' ] :
face_groups [ start_f : ] = idx
loaded [ 'metadata' ] [ 'face_groups' ] = face_groups
if len ( current [ 'usemtl' ] ) > 0 and any ( current [ 'vt_ok' ] ) :
texture = np . full ( ( len ( current [ 'vt_ok' ] ) , 3 ) , np . nan , dtype = np . float64 )
# make sure mask is numpy array for older numpy
vt_ok = np . asanyarray ( current [ 'vt_ok' ] , dtype = np . bool )
texture [ vt_ok ] = current [ 'vt' ]
for usemtl in current [ 'usemtl' ] :
try :
findices = usemtl_to_findices [ usemtl ]
uv = texture [ findices ]
# what is the file name of the texture image
file_name = mtllibs [ usemtl ] [ 'map_Kd' ]
# get the data as bytes
file_data = resolver . get ( file_name )
# load the bytes into a PIL image
image = PIL . Image . open ( util . wrap_as_stream ( file_data ) )
# create a texture object
loaded [ 'visual' ] = visual . texture . TextureVisuals ( uv = uv , image = image )
except BaseException :
log . error ( 'failed to load texture: {}' . format ( usemtl ) , exc_info = True )
# apply the vertex order to the visual object
if 'visual' in loaded :
try :
loaded [ 'visual' ] . update_vertices ( vert_order )
except BaseException :
log . error ( 'failed to update vertices' , exc_info = True )
loaded . pop ( 'visual' )
# this mesh is done so append the loaded mesh kwarg dict
meshes . append ( loaded )
attribs = { k : [ ] for k in [ 'v' , 'vt' , 'vn' ] }
current = { k : [ ] for k in [ 'v' , 'vt' , 'vn' , 'f' , 'g' , 'usemtl' , 'vt_ok' , 'vn_ok' ] }
# usemtl to ' f ' indices
usemtl_to_findices = collections . defaultdict ( list )
mtllibs = { }
# remap vertex indexes { str key : int index }
remap = { }
next_idx = 0
group_idx = 0
object_name = ''
for line in text . split ( "\n" ) :
line_split = line . strip ( ) . split ( )
if len ( line_split ) < 2 :
continue
if line_split [ 0 ] in attribs : # v , vt , or vn
# vertex , vertex texture , or vertex normal
# only parse 3 values , ignore colors
value = [ float ( x ) for x in line_split [ 1 : 4 ] ]
# vt : u [ v ] [ w ] # v , w is optional and default value is 0
if line_split [ 0 ] == 'vt' and len ( value ) != 3 :
for _ in range ( 3 - len ( value ) ) :
value . append ( 0 )
attribs [ line_split [ 0 ] ] . append ( value )
elif line_split [ 0 ] == 'f' : # a face
ft = line_split [ 1 : ]
if len ( ft ) == 4 : # hasty triangulation of quad
ft = [ ft [ 0 ] , ft [ 1 ] , ft [ 2 ] , ft [ 2 ] , ft [ 3 ] , ft [ 0 ] ]
for f in ft : # loop through each vertex reference of a face
# we are reshaping later into ( n , 3)
if f not in remap :
remap [ f ] = next_idx
next_idx += 1
# faces are " vertex index " / " vertex texture " / " vertex normal "
# you are allowed to leave a value blank , which . split
# will handle by nicely maintaining the index
f_split = f . split ( '/' )
current [ 'v' ] . append ( attribs [ 'v' ] [ int ( f_split [ 0 ] ) - 1 ] )
if len ( f_split ) > 1 and f_split [ 1 ] != '' :
current [ 'vt' ] . append ( attribs [ 'vt' ] [ int ( f_split [ 1 ] ) - 1 ] )
current [ 'vt_ok' ] . append ( True )
else :
current [ 'vt_ok' ] . append ( False )
if len ( f_split ) > 2 :
current [ 'vn' ] . append ( attribs [ 'vn' ] [ int ( f_split [ 2 ] ) - 1 ] )
current [ 'vn_ok' ] . append ( True )
else :
current [ 'vn_ok' ] . append ( False )
if len ( current [ 'usemtl' ] ) > 0 :
usemtl_to_findices [ current [ 'usemtl' ] [ - 1 ] ] . append ( len ( current [ 'vt' ] ) - 1 )
current [ 'f' ] . append ( remap [ f ] )
elif line_split [ 0 ] == 'o' : # defining a new object
append_mesh ( )
# reset current to empty lists
current = { k : [ ] for k in current . keys ( ) }
usemtl_to_findices = collections . defaultdict ( list )
remap = { }
next_idx = 0
group_idx = 0
object_name = line_split [ 1 ]
elif line_split [ 0 ] == 'g' : # defining a new group
group_idx += 1
current [ 'g' ] . append ( ( group_idx , len ( current [ 'f' ] ) // 3 ) )
elif line_split [ 0 ] == 'mtllib' : # the name of the referenced material file
mtl_name = line_split [ 1 ]
try : # fetch bytes containing MTL data
mtl_data = resolver . get ( mtl_name )
# load into a list of dict
for mtllib in parse_mtl ( mtl_data ) : # save new materials
mtllibs [ mtllib [ 'newmtl' ] ] = mtllib
except BaseException :
log . error ( 'unable to load material: {}' . format ( mtl_name ) , exc_info = True )
continue
elif line_split [ 0 ] == 'usemtl' :
current [ 'usemtl' ] . append ( line_split [ 1 ] )
if next_idx > 0 :
append_mesh ( )
return meshes
|
def set_column_count ( self , count ) :
"""Sets the table column count .
Args :
count ( int ) : column of rows"""
|
current_row_count = self . row_count ( )
current_column_count = self . column_count ( )
if count > current_column_count :
cl = TableEditableItem if self . _editable else TableItem
for r_key in self . children . keys ( ) :
row = self . children [ r_key ]
for i in range ( current_column_count , count ) :
row . append ( cl ( ) , str ( i ) )
if self . _editable :
row . children [ str ( i ) ] . onchange . connect ( self . on_item_changed , int ( r_key ) , int ( i ) )
self . _update_first_row ( )
elif count < current_column_count :
for row in self . children . values ( ) :
for i in range ( count , current_column_count ) :
row . remove_child ( row . children [ str ( i ) ] )
self . _column_count = count
|
def track_class ( self , cls , name = None , resolution_level = 0 , keep = False , trace = False ) :
"""Track all objects of the class ` cls ` . Objects of that type that already
exist are * not * tracked . If ` track _ class ` is called for a class already
tracked , the tracking parameters are modified . Instantiation traces can be
generated by setting ` trace ` to True .
A constructor is injected to begin instance tracking on creation
of the object . The constructor calls ` track _ object ` internally .
: param cls : class to be tracked , may be an old - style or a new - style class
: param name : reference the class by a name , default is the concatenation of
module and class name
: param resolution _ level : The recursion depth up to which referents are
sized individually . Resolution level 0 ( default ) treats the object
as an opaque entity , 1 sizes all direct referents individually , 2
also sizes the referents of the referents and so forth .
: param keep : Prevent the object ' s deletion by keeping a ( strong )
reference to the object .
: param trace : Save instantiation stack trace for each instance"""
|
if not isclass ( cls ) :
raise TypeError ( "only class objects can be tracked" )
if name is None :
name = cls . __module__ + '.' + cls . __name__
if self . _is_tracked ( cls ) :
self . _track_modify ( cls , name , resolution_level , keep , trace )
else :
self . _inject_constructor ( cls , self . _tracker , name , resolution_level , keep , trace )
|
def return_rri ( self , begsam , endsam ) :
"""Return raw , irregularly - timed RRI ."""
|
interval = endsam - begsam
dat = empty ( interval )
k = 0
with open ( self . filename , 'rt' ) as f :
[ next ( f ) for x in range ( 12 ) ]
for j , datum in enumerate ( f ) :
if begsam <= j < endsam :
dat [ k ] = float64 ( datum [ : datum . index ( '\t' ) ] )
k += 1
if k == interval :
break
return dat
|
def open ( self , pchPath , mode , unElementSize , unElements ) :
"""opens an existing or creates a new IOBuffer of unSize bytes"""
|
fn = self . function_table . open
pulBuffer = IOBufferHandle_t ( )
result = fn ( pchPath , mode , unElementSize , unElements , byref ( pulBuffer ) )
return result , pulBuffer
|
def versions ( self ) :
"""Property for accessing : class : ` VersionManager ` instance , which is used to get server info .
: rtype : yagocd . resources . version . VersionManager"""
|
if self . _version_manager is None :
self . _version_manager = VersionManager ( session = self . _session )
return self . _version_manager
|
def BSearchRound ( a , x , lo = 0 , hi = None ) :
"""Returns index of a that is closest to x .
Arguments :
a - - ordered numeric sequence
x - - element to search within a
lo - - lowest index to consider in search *
hi - - highest index to consider in search *
* bisect . bisect _ left capability that we don ' t need to loose ."""
|
if len ( a ) == 0 :
return - 1
hi = hi if hi is not None else len ( a )
pos = bisect_left ( a , x , lo , hi )
if pos >= hi :
return hi - 1
elif a [ pos ] == x or pos == lo :
return pos
else :
return pos - 1 if x - a [ pos - 1 ] <= a [ pos ] - x else pos
|
def read ( self , file_path = None ) :
"""Read the contents of a file .
: param filename : ( str ) path to a file in the local file system
: return : ( str ) contents of the file , or ( False ) if not found / not file"""
|
if not file_path :
file_path = self . file_path
# abort if the file path does not exist
if not os . path . exists ( file_path ) :
self . oops ( "Sorry, but {} does not exist" . format ( file_path ) )
return False
# abort if the file path is not a file
if not os . path . isfile ( file_path ) :
self . oops ( "Sorry, but {} is not a file" . format ( file_path ) )
return False
with open ( file_path ) as handler :
return handler . read ( )
|
def setCurrentPage ( self , pageno ) :
"""Sets the current page for this widget to the inputed page .
: param pageno | < int >"""
|
if ( pageno == self . _currentPage ) :
return
if ( pageno <= 0 ) :
pageno = 1
self . _currentPage = pageno
self . _prevButton . setEnabled ( pageno > 1 )
self . _nextButton . setEnabled ( pageno < self . pageCount ( ) )
self . _pagesSpinner . blockSignals ( True )
self . _pagesSpinner . setValue ( pageno )
self . _pagesSpinner . blockSignals ( False )
if ( not self . signalsBlocked ( ) ) :
self . currentPageChanged . emit ( pageno )
|
def cli ( env , identifier ) :
"""Retrieve credentials used for generating an AWS signature . Max of 2."""
|
mgr = SoftLayer . ObjectStorageManager ( env . client )
credential_list = mgr . list_credential ( identifier )
table = formatting . Table ( [ 'id' , 'password' , 'username' , 'type_name' ] )
for credential in credential_list :
table . add_row ( [ credential [ 'id' ] , credential [ 'password' ] , credential [ 'username' ] , credential [ 'type' ] [ 'name' ] ] )
env . fout ( table )
|
def getL4PredictedActiveCells ( self ) :
"""Returns the predicted active cells in each column in L4."""
|
predictedActive = [ ]
for i in xrange ( self . numColumns ) :
region = self . network . regions [ "L4Column_" + str ( i ) ]
predictedActive . append ( region . getOutputData ( "predictedActiveCells" ) . nonzero ( ) [ 0 ] )
return predictedActive
|
def _se_all ( self ) :
"""Standard errors ( SE ) for all parameters , including the intercept ."""
|
x = np . atleast_2d ( self . x )
err = np . atleast_1d ( self . ms_err )
se = np . sqrt ( np . diagonal ( np . linalg . inv ( x . T @ x ) ) * err [ : , None ] )
return np . squeeze ( se )
|
def snmp_server_engineID_drop_engineID_local ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
snmp_server = ET . SubElement ( config , "snmp-server" , xmlns = "urn:brocade.com:mgmt:brocade-snmp" )
engineID_drop = ET . SubElement ( snmp_server , "engineID-drop" )
engineID = ET . SubElement ( engineID_drop , "engineID" )
local = ET . SubElement ( engineID , "local" )
local . text = kwargs . pop ( 'local' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def guess_decode_from_terminal ( text , term ) :
"""Decode * text * coming from terminal * term * .
First try the terminal encoding , if given .
Then try UTF - 8 . Then try the preferred locale encoding .
Fall back to latin - 1 , which always works ."""
|
if getattr ( term , 'encoding' , None ) :
try :
text = text . decode ( term . encoding )
except UnicodeDecodeError :
pass
else :
return text , term . encoding
return guess_decode ( text )
|
def _decode_bytes ( self , bytestring ) :
"""Internal method used to convert the utf - 8 encoded bytestring into unicode .
If the conversion fails , the socket will be closed ."""
|
if not bytestring :
return u''
try :
return bytestring . decode ( 'utf-8' )
except UnicodeDecodeError :
self . close ( 1007 )
raise
|
def plain ( self ) :
"""Get a string representation of this XML document .
@ return : A I { plain } string .
@ rtype : basestring"""
|
s = [ ]
s . append ( self . DECL )
root = self . root ( )
if root is not None :
s . append ( root . plain ( ) )
return '' . join ( s )
|
def unmount_volume_groups ( self ) :
"""Unmounts all volume groups and related loopback devices as identified by : func : ` find _ volume _ groups `"""
|
for vgname , pvname in self . find_volume_groups ( ) :
_util . check_output_ ( [ 'lvchange' , '-a' , 'n' , vgname ] )
_util . check_output_ ( [ 'losetup' , '-d' , pvname ] )
|
def exportTiles ( self , levels , exportBy = "LevelID" , tilePackage = False , exportExtent = "DEFAULT" , optimizeTilesForSize = True , compressionQuality = 0 , areaOfInterest = None , async = False ) :
"""The exportTiles operation is performed as an asynchronous task and
allows client applications to download map tiles from a server for
offline use . This operation is performed on a Map Service that
allows clients to export cache tiles . The result of this operation
is Map Service Job . This job response contains a reference to the
Map Service Result resource , which returns a URL to the resulting
tile package ( . tpk ) or a cache raster dataset .
exportTiles can be enabled in a service by using ArcGIS for Desktop
or the ArcGIS Server Administrator Directory . In ArcGIS for Desktop
make an admin or publisher connection to the server , go to service
properties , and enable Allow Clients to Export Cache Tiles in the
advanced caching page of the Service Editor . You can also specify
the maximum tiles clients will be allowed to download . The default
maximum allowed tile count is 100,000 . To enable this capability
using the Administrator Directory , edit the service , and set the
properties exportTilesAllowed = true and maxExportTilesCount = 100000.
At 10.2.2 and later versions , exportTiles is supported as an
operation of the Map Server . The use of the
http : / / Map Service / exportTiles / submitJob operation is deprecated .
You can provide arguments to the exportTiles operation as defined
in the following parameters table :
Inputs :
exportBy - The criteria that will be used to select the tile
service levels to export . The values can be Level IDs , cache
scales . or the resolution ( in the case of image services ) .
Values : LevelID | Resolution | Scale
levels - Specifies the tiled service levels to export . The values
should correspond to Level IDs , cache scales . or the resolution
as specified in exportBy parameter . The values can be comma
separated values or a range . Make sure tiles are present at the
levels where you attempt to export tiles .
Example 1 : 1,2,3,4,5,6,7,8,9
Example 2 : 1-4,7-9
tilePackage - Allows exporting either a tile package or a cache
raster data set . If the value is true , output will be in tile
package format , and if the value is false , a cache raster data
set is returned . The default value is false
Values : true | false
exportExtent - The extent ( bounding box ) of the tile package or the
cache dataset to be exported . If extent does not include a
spatial reference , the extent values are assumed to be in the
spatial reference of the map . The default value is full extent of
the tiled map service .
Syntax : < xmin > , < ymin > , < xmax > , < ymax >
Example 1 : - 104,35.6 , - 94.32,41
Example 2 : { " xmin " : - 109.55 , " ymin " : 25.76,
" xmax " : - 86.39 , " ymax " : 49.94,
" spatialReference " : { " wkid " : 4326 } }
optimizeTilesForSize - ( Optional ) Use this parameter to enable
compression of JPEG tiles and reduce the size of the downloaded
tile package or the cache raster data set . Compressing tiles
slightly compromises the quality of tiles but helps reduce the
size of the download . Try sample compressions to determine the
optimal compression before using this feature .
Values : true | false
compressionQuality - ( Optional ) When optimizeTilesForSize = true , you
can specify a compression factor . The value must be between 0 and
100 . The value cannot be greater than the default compression
already set on the original tile . For example , if the default
value is 75 , the value of compressionQuality must be between 0 and
75 . A value greater than 75 in this example will attempt to up
sample an already compressed tile and will further degrade the
quality of tiles .
areaOfInterest - ( Optional ) The areaOfInterest polygon allows
exporting tiles within the specified polygon areas . This parameter
supersedes the exportExtent parameter . Must be geometry . Polygon
object .
Example : { " features " : [ { " geometry " : { " rings " : [ [ [ - 100,35 ] ,
[ - 100,45 ] , [ - 90,45 ] , [ - 90,35 ] , [ - 100,35 ] ] ] ,
" spatialReference " : { " wkid " : 4326 } } } ] }
async - default True , this value ensures the returns are returned
to the user instead of the user having the check the job status
manually ."""
|
params = { "f" : "json" , "tilePackage" : tilePackage , "exportExtent" : exportExtent , "optimizeTilesForSize" : optimizeTilesForSize , "compressionQuality" : compressionQuality , "exportBy" : exportBy , "levels" : levels }
url = self . _url + "/exportTiles"
if isinstance ( areaOfInterest , Polygon ) :
geom = areaOfInterest . asDictionary ( )
template = { "features" : [ geom ] }
params [ "areaOfInterest" ] = template
if async == True :
return self . _get ( url = url , param_dict = params , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
else :
exportJob = self . _get ( url = url , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
jobUrl = "%s/jobs/%s" % ( url , exportJob [ 'jobId' ] )
gpJob = GPJob ( url = jobUrl , securityHandler = self . _securityHandler , proxy_port = self . _proxy_port , proxy_url = self . _proxy_url )
status = gpJob . jobStatus
while status != "esriJobSucceeded" :
if status in [ 'esriJobFailed' , 'esriJobCancelling' , 'esriJobCancelled' ] :
return None
else :
time . sleep ( 5 )
status = gpJob . jobStatus
allResults = gpJob . results
for k , v in allResults . items ( ) :
if k == "out_service_url" :
value = v [ 'value' ]
params = { "f" : "json" }
gpRes = self . _get ( url = v [ 'value' ] , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
if tilePackage == True :
files = [ ]
for f in gpRes [ 'files' ] :
name = f [ 'name' ]
dlURL = f [ 'url' ]
files . append ( self . _get ( url = dlURL , out_folder = tempfile . gettempdir ( ) , file_name = name , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port ) )
return files
else :
return gpRes [ 'folders' ]
else :
return None
|
def _recv ( self ) :
"""Implementation of the receive thread . Waits for data to
arrive on the socket , then passes the data through the defined
receive framer and sends it on to the application ."""
|
# Outer loop : receive some data
while True : # Wait until we can go
self . _recv_lock . release ( )
gevent . sleep ( )
# Yield to another thread
self . _recv_lock . acquire ( )
recv_buf = self . _sock . recv ( self . recv_bufsize )
# If it ' s empty , the peer closed the other end
if not recv_buf : # Manually kill the send thread ; do this manually
# instead of calling close ( ) because close ( ) will kill
# us , and since close ( ) would be running in our thread
# context , it would never get around to killing the
# send thread
if self . _send_thread :
self . _send_thread . kill ( )
self . _send_thread = None
# Manually close the socket
self . _sock . close ( )
self . _sock = None
# Make sure the manager knows we ' re closed
super ( TCPTendril , self ) . close ( )
# Notify the application
self . closed ( )
# As our last step , commit seppuku ; this will keep
# _ thread _ error ( ) from notifying the application of an
# erroneous exit from the receive thread
raise gevent . GreenletExit ( )
# Process the received data
self . _recv_frameify ( recv_buf )
|
def list_sensors ( parent_class , sensor_items , filter , strategy , status , use_python_identifiers , tuple , refresh ) :
"""Helper for implementing : meth : ` katcp . resource . KATCPResource . list _ sensors `
Parameters
sensor _ items : tuple of sensor - item tuples
As would be returned the items ( ) method of a dict containing KATCPSensor objects
keyed by Python - identifiers .
parent _ class : KATCPClientResource or KATCPClientResourceContainer
Is used for prefix calculation
Rest of parameters as for : meth : ` katcp . resource . KATCPResource . list _ sensors `"""
|
filter_re = re . compile ( filter )
found_sensors = [ ]
none_strat = resource . normalize_strategy_parameters ( 'none' )
sensor_dict = dict ( sensor_items )
for sensor_identifier in sorted ( sensor_dict . keys ( ) ) :
sensor_obj = sensor_dict [ sensor_identifier ]
search_name = ( sensor_identifier if use_python_identifiers else sensor_obj . name )
name_match = filter_re . search ( search_name )
# Only include sensors with strategies
strat_match = not strategy or sensor_obj . sampling_strategy != none_strat
if name_match and strat_match :
if refresh : # First refresh the sensor reading
yield sensor_obj . get_value ( )
# Determine the sensorname prefix :
# parent _ name . except for aggs when in KATCPClientResourceContinaer
prefix = ""
if isinstance ( parent_class , KATCPClientResourceContainer ) :
if sensor_obj . name . startswith ( "agg_" ) :
prefix = ""
else :
prefix = sensor_obj . parent_name + "."
if not status or ( sensor_obj . reading . status in status ) : # Only include sensors of the given status
if tuple : # ( sensor . name , sensor . value , sensor . value _ seconds , sensor . type , sensor . units , sensor . update _ seconds , sensor . status , strategy _ and _ params )
found_sensors . append ( ( prefix + sensor_obj . name , sensor_obj . reading . value , sensor_obj . reading . timestamp , sensor_obj . type , sensor_obj . units , sensor_obj . reading . received_timestamp , sensor_obj . reading . status , sensor_obj . sampling_strategy ) )
else :
found_sensors . append ( resource . SensorResultTuple ( object = sensor_obj , name = prefix + sensor_obj . name , python_identifier = sensor_identifier , description = sensor_obj . description , units = sensor_obj . units , type = sensor_obj . type , reading = sensor_obj . reading ) )
raise tornado . gen . Return ( found_sensors )
|
def _set_process_restart ( self , v , load = False ) :
"""Setter method for process _ restart , mapped from YANG variable / ha / process _ restart ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ process _ restart is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ process _ restart ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = process_restart . process_restart , is_container = 'container' , presence = False , yang_name = "process-restart" , rest_name = "process-restart" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Enable process restart for fault recovery' , u'display-when' : u'((/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "2002") or (/local-node/swbd-number = "2003") or (/local-node/swbd-number = "4000"))' , u'cli-incomplete-no' : None , u'cli-incomplete-command' : None , u'callpoint' : u'ha_callpoint' } } , namespace = 'urn:brocade.com:mgmt:brocade-ha' , defining_module = 'brocade-ha' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """process_restart must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=process_restart.process_restart, is_container='container', presence=False, yang_name="process-restart", rest_name="process-restart", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable process restart for fault recovery', u'display-when': u'((/local-node/swbd-number = "2000") or (/local-node/swbd-number = "2001") or (/local-node/swbd-number = "2002") or (/local-node/swbd-number = "2003") or (/local-node/swbd-number = "4000"))', u'cli-incomplete-no': None, u'cli-incomplete-command': None, u'callpoint': u'ha_callpoint'}}, namespace='urn:brocade.com:mgmt:brocade-ha', defining_module='brocade-ha', yang_type='container', is_config=True)""" , } )
self . __process_restart = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_cb_plot ( cb , plot = None ) :
"""Finds the subplot with the corresponding stream ."""
|
plot = plot or cb . plot
if isinstance ( plot , GeoOverlayPlot ) :
plots = [ get_cb_plot ( cb , p ) for p in plot . subplots . values ( ) ]
plots = [ p for p in plots if any ( s in cb . streams and getattr ( s , '_triggering' , False ) for s in p . streams ) ]
if plots :
plot = plots [ 0 ]
return plot
|
def _check_str_value ( x ) :
"""If string has a space , wrap it in double quotes and remove / escape illegal characters"""
|
if isinstance ( x , str ) : # remove commas , and single quotation marks since loadarff cannot deal with it
x = x . replace ( "," , "." ) . replace ( chr ( 0x2018 ) , "'" ) . replace ( chr ( 0x2019 ) , "'" )
# put string in double quotes
if " " in x :
if x [ 0 ] in ( '"' , "'" ) :
x = x [ 1 : ]
if x [ - 1 ] in ( '"' , "'" ) :
x = x [ : len ( x ) - 1 ]
x = '"' + x . replace ( '"' , "\\\"" ) + '"'
return str ( x )
|
def preprocess_D_segs ( self , generative_model , genomic_data ) :
"""Process P ( delDl , delDr | D ) into Pi arrays .
Sets the attributes PD _ nt _ pos _ vec , PD _ 2nd _ nt _ pos _ per _ aa _ vec ,
min _ delDl _ given _ DdelDr , max _ delDl _ given _ DdelDr , and zeroD _ given _ D .
Parameters
generative _ model : GenerativeModelVDJ
VDJ generative model class containing the model parameters .
genomic _ data : GenomicDataVDJ
VDJ genomic data class containing the V , D , and J germline
sequences and info ."""
|
cutD_genomic_CDR3_segs = genomic_data . cutD_genomic_CDR3_segs
nt2num = { 'A' : 0 , 'C' : 1 , 'G' : 2 , 'T' : 3 }
num_dell_pos , num_delr_pos , num_D_genes = generative_model . PdelDldelDr_given_D . shape
# These arrays only include the nt identity information , not the PdelDldelDr _ given _ D info
PD_nt_pos_vec = [ [ ] ] * num_D_genes
PD_2nd_nt_pos_per_aa_vec = [ [ ] ] * num_D_genes
for D_in in range ( num_D_genes ) :
current_PD_nt_pos_vec = np . zeros ( ( 4 , len ( cutD_genomic_CDR3_segs [ D_in ] ) ) )
current_PD_2nd_nt_pos_per_aa_vec = { }
for aa in self . codons_dict . keys ( ) :
current_PD_2nd_nt_pos_per_aa_vec [ aa ] = np . zeros ( ( 4 , len ( cutD_genomic_CDR3_segs [ D_in ] ) ) )
for pos , nt in enumerate ( cutD_genomic_CDR3_segs [ D_in ] ) :
current_PD_nt_pos_vec [ nt2num [ nt ] , pos ] = 1
for ins_nt in 'ACGT' :
for aa in self . codons_dict . keys ( ) :
if ins_nt + cutD_genomic_CDR3_segs [ D_in ] [ pos : pos + 2 ] in self . codons_dict [ aa ] :
current_PD_2nd_nt_pos_per_aa_vec [ aa ] [ nt2num [ ins_nt ] , pos ] = 1
PD_nt_pos_vec [ D_in ] = current_PD_nt_pos_vec
PD_2nd_nt_pos_per_aa_vec [ D_in ] = current_PD_2nd_nt_pos_per_aa_vec
min_delDl_given_DdelDr = [ [ ] ] * num_D_genes
max_delDl_given_DdelDr = [ [ ] ] * num_D_genes
zeroD_given_D = [ [ ] ] * num_D_genes
for D_in in range ( num_D_genes ) :
current_min_delDl_given_delDr = [ 0 ] * num_delr_pos
current_max_delDl_given_delDr = [ 0 ] * num_delr_pos
current_zeroD = 0
for delr in range ( num_delr_pos ) :
if num_dell_pos > len ( cutD_genomic_CDR3_segs [ D_in ] ) - delr :
current_zeroD += generative_model . PdelDldelDr_given_D [ len ( cutD_genomic_CDR3_segs [ D_in ] ) - delr , delr , D_in ]
dell = 0
while generative_model . PdelDldelDr_given_D [ dell , delr , D_in ] == 0 and dell < num_dell_pos - 1 :
dell += 1
if generative_model . PdelDldelDr_given_D [ dell , delr , D_in ] == 0 :
current_min_delDl_given_delDr [ delr ] = - 1
else :
current_min_delDl_given_delDr [ delr ] = dell
if current_min_delDl_given_delDr [ delr ] == - 1 :
current_max_delDl_given_delDr [ delr ] = - 1
else :
dell = num_dell_pos - 1
while generative_model . PdelDldelDr_given_D [ dell , delr , D_in ] == 0 and dell >= 0 :
dell -= 1
if generative_model . PdelDldelDr_given_D [ dell , delr , D_in ] == 0 :
current_max_delDl_given_delDr [ delr ] = - 1
else :
current_max_delDl_given_delDr [ delr ] = dell
min_delDl_given_DdelDr [ D_in ] = current_min_delDl_given_delDr
max_delDl_given_DdelDr [ D_in ] = current_max_delDl_given_delDr
zeroD_given_D [ D_in ] = current_zeroD
self . PD_nt_pos_vec = PD_nt_pos_vec
self . PD_2nd_nt_pos_per_aa_vec = PD_2nd_nt_pos_per_aa_vec
self . min_delDl_given_DdelDr = min_delDl_given_DdelDr
self . max_delDl_given_DdelDr = max_delDl_given_DdelDr
self . zeroD_given_D = zeroD_given_D
|
def response_builder ( self , response ) :
'''Try to return a pretty formatted response object'''
|
try :
r = response . json ( )
result = r [ 'query' ] [ 'results' ]
response = { 'num_result' : r [ 'query' ] [ 'count' ] , 'result' : result }
except ( Exception , ) as e :
print ( e )
return response . content
return response
|
def status ( self , job_ids ) :
'''Get the status of a list of jobs identified by their ids .
Args :
- job _ ids ( List of ids ) : List of identifiers for the jobs
Returns :
- List of status codes .'''
|
logging . debug ( "Checking status of : {0}" . format ( job_ids ) )
for job_id in self . resources :
poll_code = self . resources [ job_id ] [ 'proc' ] . poll ( )
if self . resources [ job_id ] [ 'status' ] in [ 'COMPLETED' , 'FAILED' ] :
continue
if poll_code is None :
self . resources [ job_id ] [ 'status' ] = 'RUNNING'
elif poll_code == 0 and self . resources [ job_id ] [ 'status' ] != 'RUNNING' :
self . resources [ job_id ] [ 'status' ] = 'COMPLETED'
elif poll_code < 0 and self . resources [ job_id ] [ 'status' ] != 'RUNNING' :
self . resources [ job_id ] [ 'status' ] = 'FAILED'
return [ self . resources [ jid ] [ 'status' ] for jid in job_ids ]
|
def parse_line ( self , line : str ) -> None :
"""Updates the dictionary with a single header line .
> > > h = HTTPHeaders ( )
> > > h . parse _ line ( " Content - Type : text / html " )
> > > h . get ( ' content - type ' )
' text / html '"""
|
if line [ 0 ] . isspace ( ) : # continuation of a multi - line header
if self . _last_key is None :
raise HTTPInputError ( "first header line cannot start with whitespace" )
new_part = " " + line . lstrip ( )
self . _as_list [ self . _last_key ] [ - 1 ] += new_part
self . _dict [ self . _last_key ] += new_part
else :
try :
name , value = line . split ( ":" , 1 )
except ValueError :
raise HTTPInputError ( "no colon in header line" )
self . add ( name , value . strip ( ) )
|
def delete_collection_certificate_signing_request ( self , ** kwargs ) : # noqa : E501
"""delete _ collection _ certificate _ signing _ request # noqa : E501
delete collection of CertificateSigningRequest # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ certificate _ signing _ request ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_certificate_signing_request_with_http_info ( ** kwargs )
# noqa : E501
else :
( data ) = self . delete_collection_certificate_signing_request_with_http_info ( ** kwargs )
# noqa : E501
return data
|
def _Close ( self ) :
"""Closes the file - like object .
If the file - like object was passed in the init function the file
object - based file - like object does not control the file - like object
and should not actually close it ."""
|
if not self . _file_object_set_in_init :
try : # TODO : fix close being called for the same object multiple times .
self . _file_object . close ( )
except IOError :
pass
self . _file_object = None
|
def Guardar ( self , tipo_doc , nro_doc , denominacion , cat_iva , direccion , email , imp_ganancias = 'NI' , imp_iva = 'NI' , monotributo = 'NI' , integrante_soc = 'N' , empleador = 'N' ) :
"Agregar o actualizar los datos del cliente"
|
if self . Buscar ( nro_doc , tipo_doc ) :
sql = ( "UPDATE padron SET denominacion=?, cat_iva=?, email=?, " "imp_ganancias=?, imp_iva=?, monotributo=?, " "integrante_soc=?, empleador=? " "WHERE tipo_doc=? AND nro_doc=?" )
params = [ denominacion , cat_iva , email , imp_ganancias , imp_iva , monotributo , integrante_soc , empleador , tipo_doc , nro_doc ]
else :
sql = ( "INSERT INTO padron (tipo_doc, nro_doc, denominacion, " "cat_iva, email, imp_ganancias, imp_iva, monotributo, " "integrante_soc, empleador) " "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" )
params = [ tipo_doc , nro_doc , denominacion , cat_iva , email , imp_ganancias , imp_iva , monotributo , integrante_soc , empleador ]
self . cursor . execute ( sql , params )
# agregar el domicilio solo si no existe :
if direccion :
self . cursor . execute ( "SELECT * FROM domicilio WHERE direccion=? " "AND tipo_doc=? AND nro_doc=?" , [ direccion , tipo_doc , nro_doc ] )
if self . cursor . rowcount < 0 :
sql = ( "INSERT INTO domicilio (nro_doc, tipo_doc, direccion)" "VALUES (?, ?, ?)" )
self . cursor . execute ( sql , [ nro_doc , tipo_doc , direccion ] )
self . db . commit ( )
return True
|
def _pressed ( self , evt ) :
"""Clicked somewhere in the calendar ."""
|
x , y , widget = evt . x , evt . y , evt . widget
item = widget . identify_row ( y )
column = widget . identify_column ( x )
if not column or not item in self . _items : # clicked in the weekdays row or just outside the columns
return
item_values = widget . item ( item ) [ 'values' ]
if not len ( item_values ) : # row is empty for this month
return
text = item_values [ int ( column [ 1 ] ) - 1 ]
if not text : # date is empty
return
bbox = widget . bbox ( item , column )
if not bbox : # calendar not visible yet
return
# update and then show selection
text = '%02d' % text
self . _selection = ( text , item , column )
self . _show_selection ( text , bbox )
year , month = self . _date . year , self . _date . month
try :
self . _TargetElement . Update ( self . datetime ( year , month , int ( self . _selection [ 0 ] ) ) )
if self . _TargetElement . ChangeSubmits :
self . _TargetElement . ParentForm . LastButtonClicked = self . _TargetElement . Key
self . _TargetElement . ParentForm . FormRemainedOpen = True
self . _TargetElement . ParentForm . TKroot . quit ( )
# kick the users out of the mainloop
except :
pass
if self . close_when_chosen :
self . _master . destroy ( )
|
def rpc_get_subdomain_ops_at_txid ( self , txid , ** con_info ) :
"""Return the list of subdomain operations accepted within a given txid .
Return { ' status ' : True , ' subdomain _ ops ' : [ { . . . } ] } on success
Return { ' error ' : . . . } on error"""
|
if not check_string ( txid , min_length = 64 , max_length = 64 , pattern = '^[0-9a-fA-F]{64}$' ) :
return { 'error' : 'Not a valid txid' , 'http_status' : 400 }
subdomain_ops = get_subdomain_ops_at_txid ( txid )
return self . success_response ( { 'subdomain_ops' : subdomain_ops } )
|
def _get_calibration_for_mchits ( hits , lookup ) :
"""Append the position , direction and t0 columns and add t0 to time"""
|
n_hits = len ( hits )
cal = np . empty ( ( n_hits , 9 ) )
for i in range ( n_hits ) :
cal [ i ] = lookup [ hits [ 'pmt_id' ] [ i ] ]
dir_x = cal [ : , 3 ]
dir_y = cal [ : , 4 ]
dir_z = cal [ : , 5 ]
du = cal [ : , 7 ]
floor = cal [ : , 8 ]
pos_x = cal [ : , 0 ]
pos_y = cal [ : , 1 ]
pos_z = cal [ : , 2 ]
t0 = cal [ : , 6 ]
return [ dir_x , dir_y , dir_z , du , floor , pos_x , pos_y , pos_z , t0 ]
|
async def create_playlist ( self , name , * songs ) :
'''create a new playlist
| coro |
Parameters
name : str
name of new playlist
songs : array _ like
list of song ids to add to playlist'''
|
data = { 'Name' : name }
ids = [ i . id for i in ( await self . process ( songs ) ) ]
if ids :
data [ 'Ids' ] = ',' . join ( ids )
# TODO - return playlist not status
return await self . connector . post ( '/Playlists' , data = data , pass_uid = True , remote = False )
|
def update_persistent_boot ( self , devices = [ ] , persistent = False ) :
"""Changes the persistent boot device order in BIOS boot mode for host
Note : It uses first boot device from the devices and ignores rest .
: param devices : ordered list of boot devices
: param persistent : Boolean flag to indicate if the device to be set as
a persistent boot device
: raises : IloError , on an error from iLO .
: raises : IloInvalidInputError , if the given input is not valid ."""
|
device = PERSISTENT_BOOT_DEVICE_MAP . get ( devices [ 0 ] . upper ( ) )
if device == sushy . BOOT_SOURCE_TARGET_UEFI_TARGET :
try :
uefi_devices = self . uefi_target_override_devices
iscsi_device = None
for uefi_device in uefi_devices :
if uefi_device is not None and 'iSCSI' in uefi_device :
iscsi_device = uefi_device
break
if iscsi_device is None :
msg = 'No UEFI iSCSI bootable device found on system.'
raise exception . IloError ( msg )
except sushy . exceptions . SushyError as e :
msg = ( 'Unable to get uefi target override devices. ' 'Error %s' ) % ( str ( e ) )
raise exception . IloError ( msg )
uefi_boot_settings = { 'Boot' : { 'UefiTargetBootSourceOverride' : iscsi_device } }
self . _conn . patch ( self . path , data = uefi_boot_settings )
elif device is None :
device = sushy . BOOT_SOURCE_TARGET_NONE
tenure = ( sushy . BOOT_SOURCE_ENABLED_CONTINUOUS if persistent else sushy . BOOT_SOURCE_ENABLED_ONCE )
self . set_system_boot_source ( device , enabled = tenure )
|
def is_nonterminal ( self , symbol : str ) -> bool :
"""Determines whether an input symbol is a valid non - terminal in the grammar ."""
|
nonterminal_productions = self . get_nonterminal_productions ( )
return symbol in nonterminal_productions
|
def get_networks ( parent_ref , network_names = None , get_all_networks = False ) :
'''Returns networks of standard switches .
The parent object can be a datacenter .
parent _ ref
The parent object reference . A datacenter object .
network _ names
The name of the standard switch networks . Default is None .
get _ all _ networks
Boolean indicates whether to return all networks in the parent .
Default is False .'''
|
if not isinstance ( parent_ref , vim . Datacenter ) :
raise salt . exceptions . ArgumentValueError ( 'Parent has to be a datacenter.' )
parent_name = get_managed_object_name ( parent_ref )
log . trace ( 'Retrieving network from %s \'%s\', network_names=\'%s\', ' 'get_all_networks=%s' , type ( parent_ref ) . __name__ , parent_name , ',' . join ( network_names ) if network_names else None , get_all_networks )
properties = [ 'name' ]
service_instance = get_service_instance_from_managed_object ( parent_ref )
traversal_spec = vmodl . query . PropertyCollector . TraversalSpec ( path = 'networkFolder' , skip = True , type = vim . Datacenter , selectSet = [ vmodl . query . PropertyCollector . TraversalSpec ( path = 'childEntity' , skip = False , type = vim . Folder ) ] )
items = [ i [ 'object' ] for i in get_mors_with_properties ( service_instance , vim . Network , container_ref = parent_ref , property_list = properties , traversal_spec = traversal_spec ) if get_all_networks or ( network_names and i [ 'name' ] in network_names ) ]
return items
|
def upgrade_bootstrap ( directory = '.' , onlyif = None , unless = None , runas = None , env = ( ) , offline = False , buildout_ver = None ) :
'''Upgrade current bootstrap . py with the last released one .
Indeed , when we first run a buildout , a common source of problem
is to have a locally stale bootstrap , we just try to grab a new copy
directory
directory to execute in
offline
are we executing buildout in offline mode
buildout _ ver
forcing to use a specific buildout version ( 1 | 2)
onlyif
Only execute cmd if statement on the host return 0
unless
Do not execute cmd if statement on the host return 0
CLI Example :
. . code - block : : bash
salt ' * ' buildout . upgrade _ bootstrap / srv / mybuildout'''
|
if buildout_ver :
booturl = _URL_VERSIONS [ buildout_ver ]
else :
buildout_ver = _get_buildout_ver ( directory )
booturl = _get_bootstrap_url ( directory )
LOG . debug ( 'Using {0}' . format ( booturl ) )
# pylint : disable = str - format - in - logging
# try to download an up - to - date bootstrap
# set defaulttimeout
# and add possible content
directory = os . path . abspath ( directory )
b_py = os . path . join ( directory , 'bootstrap.py' )
comment = ''
try :
oldcontent = _get_bootstrap_content ( directory )
dbuild = _dot_buildout ( directory )
data = oldcontent
updated = False
dled = False
if not offline :
try :
if not os . path . isdir ( dbuild ) :
os . makedirs ( dbuild )
# only try to download once per buildout checkout
with salt . utils . files . fopen ( os . path . join ( dbuild , '{0}.updated_bootstrap' . format ( buildout_ver ) ) ) :
pass
except ( OSError , IOError ) :
LOG . info ( 'Bootstrap updated from repository' )
data = _urlopen ( booturl ) . read ( )
updated = True
dled = True
if 'socket.setdefaulttimeout' not in data :
updated = True
ldata = data . splitlines ( )
ldata . insert ( 1 , 'import socket;socket.setdefaulttimeout(2)' )
data = '\n' . join ( ldata )
if updated :
comment = 'Bootstrap updated'
with salt . utils . files . fopen ( b_py , 'w' ) as fic :
fic . write ( salt . utils . stringutils . to_str ( data ) )
if dled :
with salt . utils . files . fopen ( os . path . join ( dbuild , '{0}.updated_bootstrap' . format ( buildout_ver ) ) , 'w' ) as afic :
afic . write ( 'foo' )
except ( OSError , IOError ) :
if oldcontent :
with salt . utils . files . fopen ( b_py , 'w' ) as fic :
fic . write ( salt . utils . stringutils . to_str ( oldcontent ) )
return { 'comment' : comment }
|
def _get_longest_hit_at_qry_end ( self , nucmer_hits ) :
'''Input : list of nucmer hits to the same query . Returns the longest hit to the end of the query , or None if there is no such hit'''
|
hits_at_end = [ hit for hit in nucmer_hits if self . _is_at_qry_end ( hit ) ]
return self . _get_longest_hit_by_ref_length ( hits_at_end )
|
def log ( self , revrange = None , limit = None , firstparent = False , merges = None , path = None , follow = False ) :
"""Get commit logs
: param revrange : Either a single revision or a range of revisions as a
2 - element list or tuple .
: param int limit : Limit the number of log entries .
: param bool firstparent : Only follow the first parent of merges .
: param bool merges : True means only merges , False means no merges ,
None means both merges and non - merges .
: param str path : Only match commits containing changes on this path .
: param bool follow : Follow file history across renames .
: returns : log information
: rtype : : class : ` CommitLogEntry ` or list of : class : ` CommitLogEntry `
If revrange is None , return a list of all log entries in reverse
chronological order .
If revrange is a single revision , return a single log entry .
If revrange is a 2 element list [ A , B ] or tuple ( A , B ) , return a list of log
entries starting at B and following that branch back to A or one of its
ancestors ( not inclusive . If A is None , follow branch B back to the
beginning of history . If B is None , list all descendants in reverse
chronological order ."""
|
raise NotImplementedError
|
def prepare_patchset ( project , patchset , binaries , ips , urls ) :
"""Create black / white lists and default / project waivers
and iterates over patchset file"""
|
# Get Various Lists / Project Waivers
lists = get_lists . GetLists ( )
# Get file name black list and project waivers
file_audit_list , file_audit_project_list = lists . file_audit_list ( project )
# Get file content black list and project waivers
flag_list , ignore_list = lists . file_content_list ( project )
# Get URL Ignore Lists
url_ignore = lists . url_ignore ( project )
# Get URL Ignore Lists
ip_ignore = lists . ip_ignore ( project )
# Get File Ignore Lists
file_ignore = lists . file_ignore ( )
# Get Directory Ignore Lists
ignore_directories = lists . ignore_directories ( project )
if binaries or ips or urls :
try :
apikey = os . environ [ "VT_KEY" ]
except KeyError :
logger . error ( "Please set your virustotal.com API key as an environment variable" )
sys . exit ( 1 )
try :
vt_rate_type = config . get ( 'config' , 'vt_rate_type' )
except six . moves . configparser . NoSectionError :
logger . error ( "A config section is required for vt_rate_type with a public | private option " )
sys . exit ( 1 )
patten = re . compile ( r'\bpublic\b|\bprivate\b' )
if not patten . match ( vt_rate_type ) :
logger . error ( "Unrecognized %s option for vt_rate_type" , vt_rate_type )
sys . exit ( 1 )
else :
apikey = ""
# Open patch set to get file list
try :
fo = open ( patchset , 'r' )
lines = fo . readlines ( )
except IOError :
logger . error ( '%s does not exist' , patchset )
sys . exit ( 1 )
for line in lines :
patch_file = line . strip ( '\n' )
# Perform binary and file / content checks
scan_patch ( project , patch_file , binaries , ips , urls , file_audit_list , file_audit_project_list , flag_list , ignore_list , file_ignore , ignore_directories , url_ignore , ip_ignore , apikey )
# Process final result
process_failure ( project )
|
def mmap_move ( fileobj , dest , src , count ) :
"""Mmaps the file object if possible and moves ' count ' data
from ' src ' to ' dest ' . All data has to be inside the file size
( enlarging the file through this function isn ' t possible )
Will adjust the file offset .
Args :
fileobj ( fileobj )
dest ( int ) : The destination offset
src ( int ) : The source offset
count ( int ) The amount of data to move
Raises :
mmap . error : In case move failed
IOError : In case an operation on the fileobj fails
ValueError : In case invalid parameters were given"""
|
assert mmap is not None , "no mmap support"
if dest < 0 or src < 0 or count < 0 :
raise ValueError ( "Invalid parameters" )
try :
fileno = fileobj . fileno ( )
except ( AttributeError , IOError ) :
raise mmap . error ( "File object does not expose/support a file descriptor" )
fileobj . seek ( 0 , 2 )
filesize = fileobj . tell ( )
length = max ( dest , src ) + count
if length > filesize :
raise ValueError ( "Not in file size boundary" )
offset = ( ( min ( dest , src ) // mmap . ALLOCATIONGRANULARITY ) * mmap . ALLOCATIONGRANULARITY )
assert dest >= offset
assert src >= offset
assert offset % mmap . ALLOCATIONGRANULARITY == 0
# Windows doesn ' t handle empty mappings , add a fast path here instead
if count == 0 :
return
# fast path
if src == dest :
return
fileobj . flush ( )
file_map = mmap . mmap ( fileno , length - offset , offset = offset )
try :
file_map . move ( dest - offset , src - offset , count )
finally :
file_map . close ( )
|
def describe_keypairs ( self , * keypair_names ) :
"""Returns information about key pairs available ."""
|
keypairs = { }
for index , keypair_name in enumerate ( keypair_names ) :
keypairs [ "KeyName.%d" % ( index + 1 ) ] = keypair_name
query = self . query_factory ( action = "DescribeKeyPairs" , creds = self . creds , endpoint = self . endpoint , other_params = keypairs )
d = query . submit ( )
return d . addCallback ( self . parser . describe_keypairs )
|
def set_display_name ( self , display_name ) :
"""Sets a display name .
A display name is required and if not set , will be set by the
provider .
arg : display _ name ( string ) : the new display name
raise : InvalidArgument - ` ` display _ name ` ` is invalid
raise : NoAccess - ` ` Metadata . isReadonly ( ) ` ` is ` ` true ` `
raise : NullArgument - ` ` display _ name ` ` is ` ` null ` `
* compliance : mandatory - - This method must be implemented . *"""
|
self . _my_map [ 'displayName' ] = self . _get_display_text ( display_name , self . get_display_name_metadata ( ) )
|
def get_recipe ( self , recipe_name ) :
"""Get a recipe by name .
Args :
recipe _ name ( str ) : The name of the recipe to fetch . Can be either the
yaml file name or the name of the recipe ."""
|
if recipe_name . endswith ( '.yaml' ) :
recipe = self . _recipes . get ( RecipeObject . FromFile ( recipe_name , self . _recipe_actions , self . _recipe_resources ) . name )
else :
recipe = self . _recipes . get ( recipe_name )
if recipe is None :
raise RecipeNotFoundError ( "Could not find recipe" , recipe_name = recipe_name , known_recipes = [ x for x in self . _recipes . keys ( ) ] )
return recipe
|
def sg_seek_streamer ( self , index , force , value ) :
"""Ackowledge a streamer ."""
|
force = bool ( force )
err = self . sensor_graph . acknowledge_streamer ( index , value , force )
return [ err ]
|
def suspend ( self ) :
"""Suspends execution on all threads of the process .
@ raise WindowsError : On error an exception is raised ."""
|
self . scan_threads ( )
# force refresh the snapshot
suspended = list ( )
try :
for aThread in self . iter_threads ( ) :
aThread . suspend ( )
suspended . append ( aThread )
except Exception :
for aThread in suspended :
try :
aThread . resume ( )
except Exception :
pass
raise
|
def SavePrivateKey ( self , private_key ) :
"""Store the new private key on disk ."""
|
self . private_key = private_key
config . CONFIG . Set ( "Client.private_key" , self . private_key . SerializeToString ( ) )
config . CONFIG . Write ( )
|
def get_file ( self ) :
"""Load data into a file and return file path .
: return : path to file as string"""
|
content = self . _load ( )
if not content :
return None
filename = "temporary_file.bin"
with open ( filename , "wb" ) as file_name :
file_name . write ( content )
return filename
|
def get_channel_comment ( self , name = None , group = None , index = None ) :
"""Gets channel comment .
Channel can be specified in two ways :
* using the first positional argument * name *
* if there are multiple occurances for this channel then the
* group * and * index * arguments can be used to select a specific
group .
* if there are multiple occurances for this channel and either the
* group * or * index * arguments is None then a warning is issued
* using the group number ( keyword argument * group * ) and the channel
number ( keyword argument * index * ) . Use * info * method for group and
channel numbers
If the * raster * keyword argument is not * None * the output is
interpolated accordingly .
Parameters
name : string
name of channel
group : int
0 - based group index
index : int
0 - based channel index
Returns
comment : str
found channel comment"""
|
gp_nr , ch_nr = self . _validate_channel_selection ( name , group , index )
grp = self . groups [ gp_nr ]
if grp . data_location == v23c . LOCATION_ORIGINAL_FILE :
stream = self . _file
else :
stream = self . _tempfile
channel = grp . channels [ ch_nr ]
return channel . comment
|
def allows_simple_recursion ( self ) :
"""Check recursion level and extern status ."""
|
rec_level = self . aggregate . config [ "recursionlevel" ]
if rec_level >= 0 and self . recursion_level >= rec_level :
log . debug ( LOG_CHECK , "... no, maximum recursion level reached." )
return False
if self . extern [ 0 ] :
log . debug ( LOG_CHECK , "... no, extern." )
return False
return True
|
def _process_gradient_args ( f , kwargs ) :
"""Handle common processing of arguments for gradient and gradient - like functions ."""
|
axes = kwargs . get ( 'axes' , range ( f . ndim ) )
def _check_length ( positions ) :
if 'axes' in kwargs and len ( positions ) < len ( axes ) :
raise ValueError ( 'Length of "coordinates" or "deltas" cannot be less than that ' 'of "axes".' )
elif 'axes' not in kwargs and len ( positions ) != len ( axes ) :
raise ValueError ( 'Length of "coordinates" or "deltas" must match the number of ' 'dimensions of "f" when "axes" is not given.' )
if 'deltas' in kwargs :
if 'coordinates' in kwargs or 'x' in kwargs :
raise ValueError ( 'Cannot specify both "coordinates" and "deltas".' )
_check_length ( kwargs [ 'deltas' ] )
return 'delta' , kwargs [ 'deltas' ] , axes
elif 'coordinates' in kwargs :
_check_length ( kwargs [ 'coordinates' ] )
return 'x' , kwargs [ 'coordinates' ] , axes
elif 'x' in kwargs :
warnings . warn ( 'The use of "x" as a parameter for coordinate values has been ' 'deprecated. Use "coordinates" instead.' , metpyDeprecation )
_check_length ( kwargs [ 'x' ] )
return 'x' , kwargs [ 'x' ] , axes
elif isinstance ( f , xr . DataArray ) :
return 'pass' , axes , axes
# only the axis argument matters
else :
raise ValueError ( 'Must specify either "coordinates" or "deltas" for value positions ' 'when "f" is not a DataArray.' )
|
def from_array3D ( filename , data , iline = 189 , xline = 193 , format = SegySampleFormat . IBM_FLOAT_4_BYTE , dt = 4000 , delrt = 0 ) :
"""Create a new SEGY file from a 3D array
Create an structured SEGY file with defaulted headers from a 3 - dimensional
array . The file is inline - sorted . ilines , xlines and samples are inferred
from the array . Structure - defining fields in the binary header and
in the traceheaders are set accordingly . Such fields include , but are not
limited to iline , xline and offset . The file also contains a defaulted
textual header .
The 3 - dimensional array is interpreted as : :
xl0 xl1 xl2
/ | tr0 | tr1 | tr2 | il0
| / | tr3 | tr4 | tr5 | il1
| / | tr6 | tr7 | tr8 | il2
| / / / / n - samples
ilines = [ 1 , len ( axis ( 0 ) + 1]
xlines = [ 1 , len ( axis ( 1 ) + 1]
samples = [ 0 , len ( axis ( 2 ) ]
Parameters
filename : string - like
Path to new file
data : 3 - dimensional array - like
iline : int or segyio . TraceField
Inline number field in the trace headers . Defaults to 189 as per the
SEG - Y rev1 specification
xline : int or segyio . TraceField
Crossline number field in the trace headers . Defaults to 193 as per the
SEG - Y rev1 specification
format : int or segyio . SegySampleFormat
Sample format field in the trace header . Defaults to IBM float 4 byte
dt : int - like
sample interval
delrt : int - like
Notes
. . versionadded : : 1.8
Examples
Create a file from a 3D array , open it and read an iline :
> > > segyio . tools . from _ array3D ( path , array3d )
> > > segyio . open ( path , mode ) as f :
. . . iline = f . iline [ 0]"""
|
data = np . asarray ( data )
dimensions = len ( data . shape )
if dimensions != 3 :
problem = "Expected 3 dimensions, {} was given" . format ( dimensions )
raise ValueError ( problem )
from_array ( filename , data , iline = iline , xline = xline , format = format , dt = dt , delrt = delrt )
|
def create_user ( backend , details , response , uid , username , user = None , * args , ** kwargs ) :
"""Creates user . Depends on get _ username pipeline ."""
|
if user :
return { 'user' : user }
if not username :
return None
email = details . get ( 'email' )
original_email = None
# email is required
if not email :
message = _ ( """your social account needs to have a verified email address in order to proceed.""" )
raise AuthFailed ( backend , message )
# Avoid hitting field max length
if email and len ( email ) > 75 :
original_email = email
email = ''
return { 'user' : UserSocialAuth . create_user ( username = username , email = email , sync_emailaddress = False ) , 'original_email' : original_email , 'is_new' : True }
|
def display_completions_like_readline ( event ) :
"""Key binding handler for readline - style tab completion .
This is meant to be as similar as possible to the way how readline displays
completions .
Generate the completions immediately ( blocking ) and display them above the
prompt in columns .
Usage : :
# Call this handler when ' Tab ' has been pressed .
registry . add _ binding ( Keys . ControlI ) ( display _ completions _ like _ readline )"""
|
# Request completions .
b = event . current_buffer
if b . completer is None :
return
complete_event = CompleteEvent ( completion_requested = True )
completions = list ( b . completer . get_completions ( b . document , complete_event ) )
# Calculate the common suffix .
common_suffix = get_common_complete_suffix ( b . document , completions )
# One completion : insert it .
if len ( completions ) == 1 :
b . delete_before_cursor ( - completions [ 0 ] . start_position )
b . insert_text ( completions [ 0 ] . text )
# Multiple completions with common part .
elif common_suffix :
b . insert_text ( common_suffix )
# Otherwise : display all completions .
elif completions :
_display_completions_like_readline ( event . cli , completions )
|
def get_story ( self , id ) :
"""Fetches a single story by id .
get / v1 / public / stories / { storyId }
: param id : ID of Story
: type params : int
: returns : StoryDataWrapper
> > > m = Marvel ( public _ key , private _ key )
> > > response = m . get _ story ( 29)
> > > print response . data . result . title
Caught in the heart of a nuclear explosion , mild - mannered scientist Bruce Banner finds himself . . ."""
|
url = "%s/%s" % ( Story . resource_url ( ) , id )
response = json . loads ( self . _call ( url ) . text )
return StoryDataWrapper ( self , response )
|
def format_output ( func ) :
return func
"""Format output ."""
|
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
try :
response = func ( * args , ** kwargs )
except Exception as error :
print ( colored ( error , 'red' ) , file = sys . stderr )
sys . exit ( 1 )
else :
print ( response )
sys . exit ( 0 )
return wrapper
|
def get_index_labels ( self , targets ) :
"""Get the labels ( known target / not ) mapped to indices .
: param targets : List of known targets
: return : Dictionary of index - label mappings"""
|
target_ind = self . graph . vs . select ( name_in = targets ) . indices
rest_ind = self . graph . vs . select ( name_notin = targets ) . indices
label_mappings = { i : 1 for i in target_ind }
label_mappings . update ( { i : 0 for i in rest_ind } )
return label_mappings
|
def backend_from_fname ( name ) :
"""Determine backend module object from a file name ."""
|
ext = splitext ( name ) [ 1 ]
try :
mime = EXTS_TO_MIMETYPES [ ext ]
except KeyError :
try :
f = open ( name , 'rb' )
except IOError as e : # The file may not exist , we are being asked to determine it ' s type
# from it ' s name . Other errors are unexpected .
if e . errno != errno . ENOENT :
raise
# We will have to fall back upon the default backend .
msg = "No handler for %r, defaulting to %r" % ( ext , DEFAULT_MIME )
if 'FULLTEXT_TESTING' in os . environ :
warn ( msg )
else :
LOGGER . debug ( msg )
mod_name = MIMETYPE_TO_BACKENDS [ DEFAULT_MIME ]
else :
with f :
return backend_from_fobj ( f )
else :
mod_name = MIMETYPE_TO_BACKENDS [ mime ]
mod = import_mod ( mod_name )
return mod
|
def create ( self , data , ** kwargs ) :
"""Create a new object .
Args :
data ( dict ) : parameters to send to the server to create the
resource
* * kwargs : Extra options to send to the server ( e . g . sudo )
Returns :
RESTObject : a new instance of the managed object class built with
the data sent by the server
Raises :
GitlabAuthenticationError : If authentication is not correct
GitlabCreateError : If the server cannot perform the request"""
|
self . _check_missing_create_attrs ( data )
files = { }
# We get the attributes that need some special transformation
types = getattr ( self , '_types' , { } )
if types : # Duplicate data to avoid messing with what the user sent us
data = data . copy ( )
for attr_name , type_cls in types . items ( ) :
if attr_name in data . keys ( ) :
type_obj = type_cls ( data [ attr_name ] )
# if the type if FileAttribute we need to pass the data as
# file
if issubclass ( type_cls , g_types . FileAttribute ) :
k = type_obj . get_file_name ( attr_name )
files [ attr_name ] = ( k , data . pop ( attr_name ) )
else :
data [ attr_name ] = type_obj . get_for_api ( )
# Handle specific URL for creation
path = kwargs . pop ( 'path' , self . path )
server_data = self . gitlab . http_post ( path , post_data = data , files = files , ** kwargs )
return self . _obj_cls ( self , server_data )
|
def getPrivilegeForRole ( self , rolename ) :
"""Returns the privilege associated with a role .
Input :
rolename - name of the role
Output :
JSON Messages"""
|
params = { "f" : "json" , "rolename" : rolename }
pURL = self . _url + "/roles/getPrivilege"
return self . _post ( url = pURL , param_dict = params , securityHandler = self . _securityHandler , proxy_url = self . _proxy_url , proxy_port = self . _proxy_port )
|
def client_for ( service , service_module , thrift_service_name = None ) :
"""Build a synchronous client class for the given Thrift service .
The generated class accepts a TChannelSyncClient and an optional
hostport as initialization arguments .
Given ` ` CommentService ` ` defined in ` ` comment . thrift ` ` and registered
with Hyperbahn under the name " comment " , here ' s how this might be used :
. . code - block : : python
from tchannel . sync import TChannelSyncClient
from tchannel . sync . thrift import client _ for
from comment import CommentService
CommentServiceClient = client _ for ( ' comment ' , CommentService )
tchannel _ sync = TChannelSyncClient ( ' my - service ' )
comment _ client = CommentServiceClient ( tchannel _ sync )
future = comment _ client . postComment (
articleId ,
CommentService . Comment ( " hi " )
result = future . result ( )
: param service :
Name of the Hyperbahn service being called .
: param service _ module :
The Thrift - generated module for that service . This usually has
the same name as definied for the service in the IDL .
: param thrift _ service _ name :
If the Thrift service has a different name than its module , use
this parameter to specify it .
: returns :
An Thrift - like class , ready to be instantiated and used
with TChannelSyncClient ."""
|
assert service_module , 'service_module is required'
service = service or ''
# may be blank for non - hyperbahn use cases
if not thrift_service_name :
thrift_service_name = service_module . __name__ . rsplit ( '.' , 1 ) [ - 1 ]
method_names = get_service_methods ( service_module . Iface )
def init ( self , tchannel , hostport = None , trace = False , protocol_headers = None , ) :
self . async_thrift = self . __async_client_class__ ( tchannel = tchannel , hostport = hostport , trace = trace , protocol_headers = protocol_headers , )
self . threadloop = tchannel . _threadloop
init . __name__ = '__init__'
methods = { '__init__' : init , '__async_client_class__' : async_client_for ( service = service , service_module = service_module , thrift_service_name = thrift_service_name , ) }
methods . update ( { method_name : generate_method ( method_name ) for method_name in method_names } )
return type ( thrift_service_name + 'Client' , ( object , ) , methods )
|
def hmsStrToDeg ( ra ) :
"""Convert a string representation of RA into a float in degrees ."""
|
hour , min , sec = ra . split ( ':' )
ra_deg = hmsToDeg ( int ( hour ) , int ( min ) , float ( sec ) )
return ra_deg
|
def get_vault_query_session ( self , proxy ) :
"""Gets the OsidSession associated with the vault query service .
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . authorization . VaultQuerySession ) - a
` ` VaultQuerySession ` `
raise : NullArgument - ` ` proxy ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ vault _ query ( ) is false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ vault _ query ( ) ` ` is true . *"""
|
if not self . supports_vault_query ( ) :
raise errors . Unimplemented ( )
# pylint : disable = no - member
return sessions . VaultQuerySession ( proxy = proxy , runtime = self . _runtime )
|
def create_key ( file_ ) :
"""Create a key and save it into ` ` file _ ` ` .
Note that ` ` file ` ` must be opened in binary mode ."""
|
pkey = crypto . PKey ( )
pkey . generate_key ( crypto . TYPE_RSA , 2048 )
file_ . write ( crypto . dump_privatekey ( crypto . FILETYPE_PEM , pkey ) )
file_ . flush ( )
|
def summarycanvas ( args ) :
"""% prog summarycanvas output . vcf . gz
Generate tag counts ( GAIN / LOSS / REF / LOH ) of segments in Canvas output ."""
|
p = OptionParser ( summarycanvas . __doc__ )
opts , args = p . parse_args ( args )
if len ( args ) < 1 :
sys . exit ( not p . print_help ( ) )
for vcffile in args :
counter = get_gain_loss_summary ( vcffile )
pf = op . basename ( vcffile ) . split ( "." ) [ 0 ]
print ( pf + " " + " " . join ( "{}:{}" . format ( k , v ) for k , v in sorted ( counter . items ( ) ) ) )
|
def set_row_height ( self , n = 0 , height = 18 ) :
"""Sets the n ' th row height in pixels ."""
|
self . _widget . setRowHeight ( n , height )
return self
|
def format_fullwidth ( self , value ) :
"""Return a full width column . Note that the padding is inherited
from the first cell which inherits from column _ padding ."""
|
assert isinstance ( value , VTMLBuffer )
pad = self . colspec [ 0 ] [ 'padding' ]
fmt = self . make_formatter ( self . width - pad , pad , self . table . title_align )
return VTMLBuffer ( '\n' ) . join ( fmt ( value ) )
|
def wait_transmit ( self , * ports ) :
"""Wait for traffic end on ports .
: param ports : list of ports to wait for , if empty wait for all ports ."""
|
port_list = self . set_ports_list ( * ports )
self . api . call_rc ( 'ixCheckTransmitDone {}' . format ( port_list ) )
|
def rmtree ( self , path ) :
"""Removes directory structure , similar to shutil . rmtree ."""
|
for root , storage , streams in self . walk ( path , topdown = False ) :
for item in streams :
self . free_fat_chain ( item . sector_id , item . byte_size < self . min_stream_max_size )
self . free_dir_entry ( item )
for item in storage :
self . free_dir_entry ( item )
root . child_id = None
# remove root item
self . remove ( path )
|
def PatchAt ( cls , n , module , method_wrapper = None , module_alias = None , method_name_modifier = utils . identity , blacklist_predicate = _False , whitelist_predicate = _True , return_type_predicate = _None , getmembers_predicate = inspect . isfunction , admit_private = False , explanation = "" ) :
"""This classmethod lets you easily patch all of functions / callables from a module or class as methods a Builder class .
* * Arguments * *
* * * n * * : the position the the object being piped will take in the arguments when the function being patched is applied . See ` RegisterMethod ` and ` ThenAt ` .
* * * module * * : a module or class from which the functions / methods / callables will be taken .
* ` module _ alias = None ` : an optional alias for the module used for documentation purposes .
* ` method _ name _ modifier = lambda f _ name : None ` : a function that can modify the name of the method will take . If ` None ` the name of the function will be used .
* ` blacklist _ predicate = lambda f _ name : name [ 0 ] ! = " _ " ` : A predicate that determines which functions are banned given their name . By default it excludes all function whose name start with ` ' _ ' ` . ` blacklist _ predicate ` can also be of type list , in which case all names contained in this list will be banned .
* ` whitelist _ predicate = lambda f _ name : True ` : A predicate that determines which functions are admitted given their name . By default it include any function . ` whitelist _ predicate ` can also be of type list , in which case only names contained in this list will be admitted . You can use both ` blacklist _ predicate ` and ` whitelist _ predicate ` at the same time .
* ` return _ type _ predicate = lambda f _ name : None ` : a predicate that determines the ` _ return _ type ` of the Builder . By default it will always return ` None ` . See ` phi . builder . Builder . ThenAt ` .
* ` getmembers _ predicate = inspect . isfunction ` : a predicate that determines what type of elements / members will be fetched by the ` inspect ` module , defaults to [ inspect . isfunction ] ( https : / / docs . python . org / 2 / library / inspect . html # inspect . isfunction ) . See [ getmembers ] ( https : / / docs . python . org / 2 / library / inspect . html # inspect . getmembers ) .
* * Examples * *
Lets patch ALL the main functions from numpy into a custom builder !
from phi import PythonBuilder # or Builder
import numpy as np
class NumpyBuilder ( PythonBuilder ) : # or Builder
" A Builder for numpy functions ! "
pass
NumpyBuilder . PatchAt ( 1 , np )
N = NumpyBuilder ( lambda x : x )
Thats it ! Although a serious patch would involve filtering out functions that don ' t take arrays . Another common task would be to use ` NumpyBuilder . PatchAt ( 2 , . . . ) ` ( ` PatchAt ( n , . . ) ` in general ) when convenient to send the object being pipe to the relevant argument of the function . The previous is usually done with and a combination of ` whitelist _ predicate ` s and ` blacklist _ predicate ` s on ` PatchAt ( 1 , . . . ) ` and ` PatchAt ( 2 , . . . ) ` to filter or include the approriate functions on each kind of patch . Given the previous code we could now do
import numpy as np
x = np . array ( [ [ 1,2 ] , [ 3,4 ] ] )
y = np . array ( [ [ 5,6 ] , [ 7,8 ] ] )
z = N . Pipe (
x , N
. dot ( y )
. add ( x )
. transpose ( )
. sum ( axis = 1)
Which is strictly equivalent to
import numpy as np
x = np . array ( [ [ 1,2 ] , [ 3,4 ] ] )
y = np . array ( [ [ 5,6 ] , [ 7,8 ] ] )
z = np . dot ( x , y )
z = np . add ( z , x )
z = np . transpose ( z )
z = np . sum ( z , axis = 1)
The thing to notice is that with the ` NumpyBuilder ` we avoid the repetitive and needless passing and reassigment of the ` z ` variable , this removes a lot of noise from our code ."""
|
_rtp = return_type_predicate
return_type_predicate = ( lambda x : _rtp ) if inspect . isclass ( _rtp ) and issubclass ( _rtp , Builder ) else _rtp
module_name = module_alias if module_alias else module . __name__ + '.'
patch_members = _get_patch_members ( module , blacklist_predicate = blacklist_predicate , whitelist_predicate = whitelist_predicate , getmembers_predicate = getmembers_predicate , admit_private = admit_private )
for name , f in patch_members :
wrapped = None
if method_wrapper :
g = method_wrapper ( f )
wrapped = f
else :
g = f
cls . RegisterAt ( n , g , module_name , wrapped = wrapped , _return_type = return_type_predicate ( name ) , alias = method_name_modifier ( name ) , explanation = explanation )
|
def set_state ( self , state ) :
"""Call the set _ state method in SimStatePlugin class , and then perform the delayed initialization .
: param state : The SimState instance"""
|
SimStatePlugin . set_state ( self , state )
# Delayed initialization
stack_region_map , generic_region_map = self . _temp_stack_region_map , self . _temp_generic_region_map
if stack_region_map or generic_region_map : # Inherited from its parent
self . _stack_region_map = stack_region_map . copy ( )
self . _generic_region_map = generic_region_map . copy ( )
else :
if not self . _abstract_backer and o . REGION_MAPPING in self . state . options : # Only the top - level SimMemory instance can have region maps .
self . _stack_region_map = RegionMap ( True )
self . _generic_region_map = RegionMap ( False )
else :
self . _stack_region_map = None
self . _generic_region_map = None
|
def submission_filenames ( round_num = None , tournament = None ) :
"""Get filenames of your submissions"""
|
click . echo ( prettify ( napi . get_submission_filenames ( tournament , round_num ) ) )
|
def default_reverse_key_func ( full_key ) :
"""Reverse of Django ' s default _ key _ func , i . e . undoing :
def default _ key _ func ( key , key _ prefix , version ) :
return ' % s : % s : % s ' % ( key _ prefix , version , key )"""
|
match = reverse_key_re . match ( full_key )
return match . group ( 3 ) , match . group ( 1 ) , int ( match . group ( 2 ) )
|
def delete ( self ) :
"""Delete this record without deleting any dependent or child records .
This can orphan records , so use with care ."""
|
if self . id :
with Repo . db :
Repo ( self . __table ) . where ( id = self . id ) . delete ( )
|
def parse_config ( main_section , * filenames ) :
"""parse config files"""
|
filename = filenames [ - 1 ]
filename = os . path . abspath ( filename )
here = os . path . dirname ( filename )
defaults = dict ( here = here , hash = '#' )
defaults [ '#' ] = '#'
config = configparser . ConfigParser ( defaults , allow_no_value = False , interpolation = configparser . ExtendedInterpolation ( ) , )
config . optionxform = str
config . read ( [ os . path . expanduser ( '~/.irc3/passwd.ini' ) ] + list ( filenames ) )
value = { }
for s in config . sections ( ) :
items = { }
for k , v in config . items ( s ) :
if '\n' in v :
v = as_list ( v )
elif v . isdigit ( ) :
v = int ( v )
elif v . replace ( '.' , '' ) . isdigit ( ) and v . count ( '.' ) == 1 :
v = float ( v )
elif v in ( 'true' , 'false' ) :
v = v == 'true' and True or False
items [ k ] = v
if s == main_section :
value . update ( items )
else :
for k in ( 'here' , 'config' ) :
items . pop ( k , '' )
value [ s ] = items
value . update ( defaults )
value [ 'configfiles' ] = filenames
return value
|
def offer_url ( self ) :
"""Offer URL
: return :
Offer URL ( string ) ."""
|
return "{0}{1}/?tag={2}" . format ( AMAZON_ASSOCIATES_BASE_URL . format ( domain = DOMAINS [ self . region ] ) , self . asin , self . aws_associate_tag )
|
def __register ( self , operator ) :
"""Registers the given logical operator to the environment and
connects it to its upstream operator ( if any ) .
A call to this function adds a new edge to the logical topology .
Attributes :
operator ( Operator ) : The metadata of the logical operator ."""
|
self . env . operators [ operator . id ] = operator
self . dst_operator_id = operator . id
logger . debug ( "Adding new dataflow edge ({},{}) --> ({},{})" . format ( self . src_operator_id , self . env . operators [ self . src_operator_id ] . name , self . dst_operator_id , self . env . operators [ self . dst_operator_id ] . name ) )
# Update logical dataflow graphs
self . env . _add_edge ( self . src_operator_id , self . dst_operator_id )
# Keep track of the partitioning strategy and the destination operator
src_operator = self . env . operators [ self . src_operator_id ]
if self . is_partitioned is True :
partitioning , _ = src_operator . _get_partition_strategy ( self . id )
src_operator . _set_partition_strategy ( _generate_uuid ( ) , partitioning , operator . id )
elif src_operator . type == OpType . KeyBy : # Set the output partitioning strategy to shuffle by key
partitioning = PScheme ( PStrategy . ShuffleByKey )
src_operator . _set_partition_strategy ( _generate_uuid ( ) , partitioning , operator . id )
else : # No partitioning strategy has been defined - set default
partitioning = PScheme ( PStrategy . Forward )
src_operator . _set_partition_strategy ( _generate_uuid ( ) , partitioning , operator . id )
return self . __expand ( )
|
def line_iterator ( readable_file , size = None ) : # type : ( IO [ bytes ] , Optional [ int ] ) - > Iterator [ bytes ]
"""Iterate over the lines of a file .
Implementation reads each char individually , which is not very
efficient .
Yields :
str : a single line in the file ."""
|
read = readable_file . read
line = [ ]
byte = b"1"
if size is None or size < 0 :
while byte :
byte = read ( 1 )
line . append ( byte )
if byte in b"\n" :
yield b"" . join ( line )
del line [ : ]
else :
while byte and size :
byte = read ( 1 )
size -= len ( byte )
line . append ( byte )
if byte in b"\n" or not size :
yield b"" . join ( line )
del line [ : ]
|
def dim ( self ) :
"""NAME :
dim
PURPOSE :
return the dimension of the Orbit
INPUT :
( none )
OUTPUT :
dimension
HISTORY :
2011-02-03 - Written - Bovy ( NYU )"""
|
if len ( self . _orb . vxvv ) == 2 :
return 1
elif len ( self . _orb . vxvv ) == 3 or len ( self . _orb . vxvv ) == 4 :
return 2
elif len ( self . _orb . vxvv ) == 5 or len ( self . _orb . vxvv ) == 6 :
return 3
|
def crypto_secretstream_xchacha20poly1305_keygen ( ) :
"""Generate a key for use with
: func : ` . crypto _ secretstream _ xchacha20poly1305 _ init _ push ` ."""
|
keybuf = ffi . new ( "unsigned char[]" , crypto_secretstream_xchacha20poly1305_KEYBYTES , )
lib . crypto_secretstream_xchacha20poly1305_keygen ( keybuf )
return ffi . buffer ( keybuf ) [ : ]
|
def visitIgnoreDirective ( self , ctx : jsgParser . IgnoreDirectiveContext ) :
"""directive : ' . IGNORE ' name * SEMI"""
|
for name in as_tokens ( ctx . name ( ) ) :
self . _context . directives . append ( '_CONTEXT.IGNORE.append("{}")' . format ( name ) )
|
def get_all ( self , search_filter = None ) :
"""Fetch all data from backend ."""
|
items = self . backend . get_all ( )
if not items :
if self . version == 1 :
return { self . namespace : [ ] }
return [ ]
if search_filter :
items = jmespath . search ( search_filter , items )
return items
|
def prepare_raise ( func ) :
"""Just a short decorator which shrinks
full ` ` raise ( E , V , T ) ` ` form into proper ` ` raise E ( V ) , T ` ` ."""
|
@ functools . wraps ( func )
def decorator ( type_ , value = None , traceback = None ) :
if value is not None and isinstance ( type_ , Exception ) :
raise TypeError ( "instance exception may not have a separate value" )
if value is None :
if isinstance ( type_ , Exception ) :
error = type_
else :
error = type_ ( )
else :
error = type_ ( value )
func ( error , value , traceback )
return decorator
|
def RemoveObject ( self , path ) :
'''Remove a D - Bus object from the mock
As with AddObject , this will * not * emit the InterfacesRemoved signal if
it ’ s an ObjectManager instance .'''
|
try :
objects [ path ] . remove_from_connection ( )
del objects [ path ]
except KeyError :
raise dbus . exceptions . DBusException ( 'object %s does not exist' % path , name = 'org.freedesktop.DBus.Mock.NameError' )
|
async def renew ( self , session , * , dc = None ) :
"""Renews a TTL - based session
Parameters :
session ( ObjectID ) : Session ID
dc ( str ) : Specify datacenter that will be used .
Defaults to the agent ' s local datacenter .
Returns :
ObjectMeta : where value is session
Raises :
NotFound : session is absent
The response looks like this : :
" LockDelay " : datetime . timedelta ( 0 , 15 ) ,
" Checks " : [
" serfHealth "
" Node " : " foobar " ,
" ID " : " adf4238a - 882b - 9ddc - 4a9d - 5b6758e4159e " ,
" CreateIndex " : 1086449
" Behavior " : " release " ,
" TTL " : datetime . timedelta ( 0 , 15)
. . note : : Consul MAY return a TTL value higher than the one
specified during session creation . This indicates
the server is under high load and is requesting
clients renew less often ."""
|
session_id = extract_attr ( session , keys = [ "ID" ] )
response = await self . _api . put ( "/v1/session/renew" , session_id , params = { "dc" : dc } )
try :
result = response . body [ 0 ]
except IndexError :
meta = extract_meta ( response . headers )
raise NotFound ( "No session for %r" % session_id , meta = meta )
return consul ( result , meta = extract_meta ( response . headers ) )
|
def update_vault ( self , vault_form ) :
"""Updates an existing vault .
arg : vault _ form ( osid . authorization . VaultForm ) : the form
containing the elements to be updated
raise : IllegalState - ` ` vault _ form ` ` already used in an update
transaction
raise : InvalidArgument - the form contains an invalid value
raise : NullArgument - ` ` vault _ form ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
raise : Unsupported - ` ` vault _ form ` ` did not originate from
` ` get _ vault _ form _ for _ update ( ) ` `
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . BinAdminSession . update _ bin _ template
if self . _catalog_session is not None :
return self . _catalog_session . update_catalog ( catalog_form = vault_form )
collection = JSONClientValidated ( 'authorization' , collection = 'Vault' , runtime = self . _runtime )
if not isinstance ( vault_form , ABCVaultForm ) :
raise errors . InvalidArgument ( 'argument type is not an VaultForm' )
if not vault_form . is_for_update ( ) :
raise errors . InvalidArgument ( 'the VaultForm is for update only, not create' )
try :
if self . _forms [ vault_form . get_id ( ) . get_identifier ( ) ] == UPDATED :
raise errors . IllegalState ( 'vault_form already used in an update transaction' )
except KeyError :
raise errors . Unsupported ( 'vault_form did not originate from this session' )
if not vault_form . is_valid ( ) :
raise errors . InvalidArgument ( 'one or more of the form elements is invalid' )
collection . save ( vault_form . _my_map )
# save is deprecated - change to replace _ one
self . _forms [ vault_form . get_id ( ) . get_identifier ( ) ] = UPDATED
# Note : this is out of spec . The OSIDs don ' t require an object to be returned
return objects . Vault ( osid_object_map = vault_form . _my_map , runtime = self . _runtime , proxy = self . _proxy )
|
def delete_post ( apikey , post_id , username , password , publish ) :
"""blogger . deletePost ( api _ key , post _ id , username , password , ' publish ' )
= > boolean"""
|
user = authenticate ( username , password , 'zinnia.delete_entry' )
entry = Entry . objects . get ( id = post_id , authors = user )
entry . delete ( )
return True
|
def get_arguments ( options ) :
"""This function handles and validates the wrapper arguments ."""
|
# These the next couple of lines defines the header of the Help output
parser = ArgumentParser ( formatter_class = RawDescriptionHelpFormatter , usage = ( """%(prog)s
--------------------------------------------------------------------------------
""" ) , description = ( """
Service Wrapper
===============
This is the service wrapper script, which is a part of the CGE services.
Read the online manual for help.
A list of all published services can be found at:
cge.cbs.dtu.dk/services
""" ) , epilog = ( """
--------------------------------------------------------------------------------
""" ) )
# ADDING ARGUMENTS
setarg = parser . add_argument
# SERVICE SPECIFIC ARGUMENTS
if isinstance ( options , str ) :
options = [ [ x for i , x in enumerate ( line . split ( ) ) if i in [ 1 , 2 ] ] for line in options . split ( '\n' ) if len ( line ) > 0 ]
for o in options :
try :
setarg ( o [ 1 ] , type = str , dest = o [ 0 ] , default = None , help = SUPPRESS )
except :
None
else :
for o in options :
if o [ 2 ] is True : # Handle negative flags
setarg ( o [ 0 ] , action = "store_false" , dest = o [ 1 ] , default = o [ 2 ] , help = o [ 3 ] )
elif o [ 2 ] is False : # Handle positive flags
setarg ( o [ 0 ] , action = "store_true" , dest = o [ 1 ] , default = o [ 2 ] , help = o [ 3 ] )
else :
help_ = o [ 3 ] if o [ 2 ] is None else "%s [%s]" % ( o [ 3 ] , '%(default)s' )
setarg ( o [ 0 ] , type = str , dest = o [ 1 ] , default = o [ 2 ] , help = help_ )
# VALIDATION OF ARGUMENTS
args = parser . parse_args ( )
debug . log ( "ARGS: %s" % args )
return args
|
def pre_filter ( self ) :
"""Return rTorrent condition to speed up data transfer ."""
|
if self . _name not in self . PRE_FILTER_FIELDS or self . _template :
return ''
if not self . _value :
return '"equal={},cat="' . format ( self . PRE_FILTER_FIELDS [ self . _name ] )
if self . _is_regex :
needle = self . _value [ 1 : - 1 ]
needle = self . CLEAN_PRE_VAL_RE . sub ( ' ' , needle )
needle = self . SPLIT_PRE_VAL_RE . split ( needle )
else :
needle = self . CLEAN_PRE_VAL_RE . sub ( ' ' , self . _value )
needle = self . SPLIT_PRE_GLOB_RE . split ( needle )
needle = list ( sorted ( needle , key = len ) ) [ - 1 ]
if needle :
try :
needle . encode ( 'ascii' )
except UnicodeEncodeError :
return ''
else :
return r'"string.contains_i=${},\"{}\""' . format ( self . PRE_FILTER_FIELDS [ self . _name ] , needle . replace ( '"' , r'\\\"' ) )
return ''
|
def retrieve_config ( self , value , default ) :
"""Retrieves a value ( with a certain fallback ) from the config files
( looks first into config _ filename _ global then into
config _ filename _ user . The latest takes preeminence ) if the command line
flag for the value is used , that overrides everything else"""
|
args = self . args
name = self . name
try :
if args [ value ] :
return args [ value ]
except KeyError :
pass
section = name if self . config . has_section ( name ) else self . config . default_section
answer = self . config . get ( section , value , fallback = default )
return answer
|
def astensor ( array : TensorLike ) -> BKTensor :
"""Convert to product tensor"""
|
tensor = tf . convert_to_tensor ( array , dtype = CTYPE )
if DEVICE == 'gpu' :
tensor = tensor . gpu ( )
# pragma : no cover
# size = np . prod ( np . array ( tensor . get _ shape ( ) . as _ list ( ) ) )
N = int ( math . log2 ( size ( tensor ) ) )
tensor = tf . reshape ( tensor , ( [ 2 ] * N ) )
return tensor
|
def success ( self , ** kwargs ) :
"""Returns all arguments received in init and this method call"""
|
response = { 'success' : True }
# check dates can be manipulated
response . update ( kwargs )
response . update ( self . kwargs )
response [ 'test_argument3' ] = datetime . timedelta ( days = 1 ) + response [ 'test_argument3' ]
return response
|
def fix_cufflinks_attributes ( ref_gtf , merged_gtf , data , out_file = None ) :
"""replace the cufflinks gene _ id and transcript _ id with the
gene _ id and transcript _ id from ref _ gtf , where available"""
|
base , ext = os . path . splitext ( merged_gtf )
fixed = out_file if out_file else base + ".clean.fixed" + ext
if file_exists ( fixed ) :
return fixed
ref_db = gtf . get_gtf_db ( ref_gtf )
merged_db = gtf . get_gtf_db ( merged_gtf , in_memory = True )
ref_tid_to_gid = { }
for gene in ref_db . features_of_type ( 'gene' ) :
for transcript in ref_db . children ( gene , level = 1 ) :
ref_tid_to_gid [ transcript . id ] = gene . id
ctid_to_cgid = { }
ctid_to_oid = { }
for gene in merged_db . features_of_type ( 'gene' ) :
for transcript in merged_db . children ( gene , level = 1 ) :
ctid_to_cgid [ transcript . id ] = gene . id
feature = list ( merged_db . children ( transcript ) ) [ 0 ]
oid = feature . attributes . get ( "oId" , [ None ] ) [ 0 ]
if oid :
ctid_to_oid [ transcript . id ] = oid
cgid_to_gid = { }
for ctid , oid in ctid_to_oid . items ( ) :
cgid = ctid_to_cgid . get ( ctid , None )
oid = ctid_to_oid . get ( ctid , None )
gid = ref_tid_to_gid . get ( oid , None ) if oid else None
if cgid and gid :
cgid_to_gid [ cgid ] = gid
with file_transaction ( data , fixed ) as tmp_fixed_file :
with open ( tmp_fixed_file , "w" ) as out_handle :
for gene in merged_db . features_of_type ( 'gene' ) :
for transcript in merged_db . children ( gene , level = 1 ) :
for feature in merged_db . children ( transcript ) :
cgid = feature . attributes . get ( "gene_id" , [ None ] ) [ 0 ]
gid = cgid_to_gid . get ( cgid , None )
ctid = None
if gid :
feature . attributes [ "gene_id" ] [ 0 ] = gid
ctid = feature . attributes . get ( "transcript_id" , [ None ] ) [ 0 ]
tid = ctid_to_oid . get ( ctid , None )
if tid :
feature . attributes [ "transcript_id" ] [ 0 ] = tid
if "nearest_ref" in feature . attributes :
del feature . attributes [ "nearest_ref" ]
if "oId" in feature . attributes :
del feature . attributes [ "oId" ]
out_handle . write ( str ( feature ) + "\n" )
return fixed
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.