signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def chords ( chord_labels , intervals , fs , ** kwargs ) :
"""Synthesizes chord labels
Parameters
chord _ labels : list of str
List of chord label strings .
intervals : np . ndarray , shape = ( len ( chord _ labels ) , 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
: func : ` mir _ eval . sonify . time _ frequency `
Returns
output : np . ndarray
Synthesized chord labels"""
|
util . validate_intervals ( intervals )
# Convert from labels to chroma
roots , interval_bitmaps , _ = chord . encode_many ( chord_labels )
chromagram = np . array ( [ np . roll ( interval_bitmap , root ) for ( interval_bitmap , root ) in zip ( interval_bitmaps , roots ) ] ) . T
return chroma ( chromagram , intervals , fs , ** kwargs )
|
def readline ( self , timeout = None ) : # timeout is not in use
"""Read data from port and strip escape characters
: param timeout :
: return : Stripped line ."""
|
fil = self . port . makefile ( )
line = fil . readline ( )
return strip_escape ( line . strip ( ) )
|
def adjust_short_series ( self , timegrid , values ) :
"""Adjust a short time series to a longer timegrid .
Normally , time series data to be read from a external data files
should span ( at least ) the whole initialization time period of a
HydPy project . However , for some variables which are only used
for comparison ( e . g . observed runoff used for calibration ) ,
incomplete time series might also be helpful . This method it
thought for adjusting such incomplete series to the public
initialization time grid stored in module | pub | . It is
automatically called in method | IOSequence . adjust _ series | when
necessary provided that the option | Options . checkseries | is
disabled .
Assume the initialization time period of a HydPy project spans
five day :
> > > from hydpy import pub
> > > pub . timegrids = ' 2000.01.10 ' , ' 2000.01.15 ' , ' 1d '
Prepare a node series object for observational data :
> > > from hydpy . core . sequencetools import Obs
> > > obs = Obs ( None )
Prepare a test function that expects the timegrid of the
data and the data itself , which returns the ajdusted array by
means of calling method | IOSequence . adjust _ short _ series | :
> > > import numpy
> > > def test ( timegrid ) :
. . . values = numpy . ones ( len ( timegrid ) )
. . . return obs . adjust _ short _ series ( timegrid , values )
The following calls to the test function shows the arrays
returned for different kinds of misalignments :
> > > from hydpy import Timegrid
> > > test ( Timegrid ( ' 2000.01.05 ' , ' 2000.01.20 ' , ' 1d ' ) )
array ( [ 1 . , 1 . , 1 . , 1 . , 1 . ] )
> > > test ( Timegrid ( ' 2000.01.12 ' , ' 2000.01.15 ' , ' 1d ' ) )
array ( [ nan , nan , 1 . , 1 . , 1 . ] )
> > > test ( Timegrid ( ' 2000.01.12 ' , ' 2000.01.17 ' , ' 1d ' ) )
array ( [ nan , nan , 1 . , 1 . , 1 . ] )
> > > test ( Timegrid ( ' 2000.01.10 ' , ' 2000.01.13 ' , ' 1d ' ) )
array ( [ 1 . , 1 . , 1 . , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.08 ' , ' 2000.01.13 ' , ' 1d ' ) )
array ( [ 1 . , 1 . , 1 . , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.12 ' , ' 2000.01.13 ' , ' 1d ' ) )
array ( [ nan , nan , 1 . , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.05 ' , ' 2000.01.10 ' , ' 1d ' ) )
array ( [ nan , nan , nan , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.05 ' , ' 2000.01.08 ' , ' 1d ' ) )
array ( [ nan , nan , nan , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.15 ' , ' 2000.01.18 ' , ' 1d ' ) )
array ( [ nan , nan , nan , nan , nan ] )
> > > test ( Timegrid ( ' 2000.01.16 ' , ' 2000.01.18 ' , ' 1d ' ) )
array ( [ nan , nan , nan , nan , nan ] )
Through enabling option | Options . usedefaultvalues | the missing
values are initialised with zero instead of nan :
> > > with pub . options . usedefaultvalues ( True ) :
. . . test ( Timegrid ( ' 2000.01.12 ' , ' 2000.01.17 ' , ' 1d ' ) )
array ( [ 0 . , 0 . , 1 . , 1 . , 1 . ] )"""
|
idxs = [ timegrid [ hydpy . pub . timegrids . init . firstdate ] , timegrid [ hydpy . pub . timegrids . init . lastdate ] ]
valcopy = values
values = numpy . full ( self . seriesshape , self . initinfo [ 0 ] )
len_ = len ( valcopy )
jdxs = [ ]
for idx in idxs :
if idx < 0 :
jdxs . append ( 0 )
elif idx <= len_ :
jdxs . append ( idx )
else :
jdxs . append ( len_ )
valcopy = valcopy [ jdxs [ 0 ] : jdxs [ 1 ] ]
zdx1 = max ( - idxs [ 0 ] , 0 )
zdx2 = zdx1 + jdxs [ 1 ] - jdxs [ 0 ]
values [ zdx1 : zdx2 ] = valcopy
return values
|
def version_option ( version = None , * param_decls , ** attrs ) :
"""Adds a ` ` - - version ` ` option which immediately ends the program
printing out the version number . This is implemented as an eager
option that prints the version and exits the program in the callback .
: param version : the version number to show . If not provided Click
attempts an auto discovery via setuptools .
: param prog _ name : the name of the program ( defaults to autodetection )
: param message : custom message to show instead of the default
( ` ` ' % ( prog ) s , version % ( version ) s ' ` ` )
: param others : everything else is forwarded to : func : ` option ` ."""
|
if version is None :
module = sys . _getframe ( 1 ) . f_globals . get ( '__name__' )
def decorator ( f ) :
prog_name = attrs . pop ( 'prog_name' , None )
message = attrs . pop ( 'message' , '%(prog)s, version %(version)s' )
def callback ( ctx , param , value ) :
if not value or ctx . resilient_parsing :
return
prog = prog_name
if prog is None :
prog = ctx . find_root ( ) . info_name
ver = version
if ver is None :
try :
import pkg_resources
except ImportError :
pass
else :
for dist in pkg_resources . working_set :
scripts = dist . get_entry_map ( ) . get ( 'console_scripts' ) or { }
for script_name , entry_point in iteritems ( scripts ) :
if entry_point . module_name == module :
ver = dist . version
break
if ver is None :
raise RuntimeError ( 'Could not determine version' )
echo ( message % { 'prog' : prog , 'version' : ver , } , color = ctx . color )
ctx . exit ( )
attrs . setdefault ( 'is_flag' , True )
attrs . setdefault ( 'expose_value' , False )
attrs . setdefault ( 'is_eager' , True )
attrs . setdefault ( 'help' , 'Show the version and exit.' )
attrs [ 'callback' ] = callback
return option ( * ( param_decls or ( '--version' , ) ) , ** attrs ) ( f )
return decorator
|
def cli ( ctx , list , fpga ) :
"""Manage FPGA boards ."""
|
if list :
Resources ( ) . list_boards ( )
elif fpga :
Resources ( ) . list_fpgas ( )
else :
click . secho ( ctx . get_help ( ) )
|
def delete ( method , hmc , uri , uri_parms , logon_required ) :
"""Operation : Delete HBA ( requires DPM mode ) ."""
|
try :
hba = hmc . lookup_by_uri ( uri )
except KeyError :
raise InvalidResourceError ( method , uri )
partition = hba . manager . parent
cpc = partition . manager . parent
assert cpc . dpm_enabled
check_valid_cpc_status ( method , uri , cpc )
check_partition_status ( method , uri , partition , invalid_statuses = [ 'starting' , 'stopping' ] )
partition . hbas . remove ( hba . oid )
|
def export_schema_to_dict ( back_references ) :
"""Exports the supported import / export schema to a dictionary"""
|
databases = [ Database . export_schema ( recursive = True , include_parent_ref = back_references ) ]
clusters = [ DruidCluster . export_schema ( recursive = True , include_parent_ref = back_references ) ]
data = dict ( )
if databases :
data [ DATABASES_KEY ] = databases
if clusters :
data [ DRUID_CLUSTERS_KEY ] = clusters
return data
|
def make_stanza ( self ) :
"""Create and return a presence stanza with the current settings .
: return : Presence stanza
: rtype : : class : ` aioxmpp . Presence `"""
|
stanza = aioxmpp . Presence ( )
self . _state . apply_to_stanza ( stanza )
stanza . status . update ( self . _status )
return stanza
|
def get_genus_type_metadata ( self ) :
"""Overrides get _ genus _ type _ metadata of extended object"""
|
metadata = dict ( self . my_osid_object_form . _genus_type_metadata )
metadata . update ( { 'read_only' : True } )
return Metadata ( ** metadata )
|
def start ( self ) :
'''Starts a server on the port provided in the : class : ` Server ` constructor
in a separate thread
: rtype : Server
: returns : server instance for chaining'''
|
self . _handler = _create_handler_class ( self . _rules , self . _always_rules )
self . _server = HTTPServer ( ( '' , self . _port ) , self . _handler )
self . _thread = Thread ( target = self . _server . serve_forever , daemon = True )
self . _thread . start ( )
self . running = True
return self
|
def _translate_args ( args , desired_locale = None ) :
"""Translates all the translatable elements of the given arguments object .
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries .
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable .
If the locale is None the object is translated to the system locale .
: param args : the args to translate
: param desired _ locale : the locale to translate the args to , if None the
default system locale will be used
: returns : a new args object with the translated contents of the original"""
|
if isinstance ( args , tuple ) :
return tuple ( translate ( v , desired_locale ) for v in args )
if isinstance ( args , dict ) :
translated_dict = { }
for ( k , v ) in six . iteritems ( args ) :
translated_v = translate ( v , desired_locale )
translated_dict [ k ] = translated_v
return translated_dict
return translate ( args , desired_locale )
|
def parse ( directive ) :
"""Given a string in the format ` scope : directive ` , or simply ` scope `
or ` directive ` , return a Placement object suitable for passing
back over the websocket API ."""
|
if not directive : # Handle null case
return None
if isinstance ( directive , ( list , tuple ) ) :
results = [ ]
for d in directive :
results . extend ( parse ( d ) )
return results
if isinstance ( directive , ( dict , client . Placement ) ) : # We ' ve been handed something that we can simply hand back to
# the api . ( Forwards compatibility )
return [ directive ]
# Juju 2.0 can ' t handle lxc containers .
directive = directive . replace ( 'lxc' , 'lxd' )
if ":" in directive : # Planner has given us a scope and directive in string form
scope , directive = directive . split ( ":" )
return [ client . Placement ( scope = scope , directive = directive ) ]
if directive . isdigit ( ) : # Planner has given us a machine id ( we rely on juju core to
# verify its validity . )
return [ client . Placement ( scope = MACHINE_SCOPE , directive = directive ) ]
if "/" in directive : # e . g . " 0 / lxd / 0"
# https : / / github . com / juju / juju / blob / master / instance / placement _ test . go # L29
return [ client . Placement ( scope = MACHINE_SCOPE , directive = directive ) , ]
# Planner has probably given us a container type . Leave it up to
# juju core to verify that it is valid .
return [ client . Placement ( scope = directive ) ]
|
def find_package_indexes_in_dir ( self , simple_dir ) :
"""Given a directory that contains simple packages indexes , return
a sorted list of normalized package names . This presumes every
directory within is a simple package index directory ."""
|
packages = sorted ( { # Filter out all of the " non " normalized names here
canonicalize_name ( x ) for x in os . listdir ( simple_dir ) } )
# Package indexes must be in directories , so ignore anything else .
packages = [ x for x in packages if os . path . isdir ( os . path . join ( simple_dir , x ) ) ]
return packages
|
def serialize ( self , include_class = True , save_dynamic = False , ** kwargs ) :
"""Serialize Singleton instance to a dictionary .
This behaves identically to HasProperties . serialize , except it also
saves the identifying name in the dictionary as well ."""
|
json_dict = super ( Singleton , self ) . serialize ( include_class = include_class , save_dynamic = save_dynamic , ** kwargs )
json_dict [ '_singleton_id' ] = self . _singleton_id
return json_dict
|
def add_app_template_global ( self , func : Callable , name : Optional [ str ] = None ) -> None :
"""Add an application wide template global .
This is designed to be used on the blueprint directly , and
has the same arguments as
: meth : ` ~ quart . Quart . add _ template _ global ` . An example usage ,
. . code - block : : python
def global ( ) :
blueprint = Blueprint ( _ _ name _ _ )
blueprint . add _ app _ template _ global ( global )"""
|
self . record_once ( lambda state : state . register_template_global ( func , name ) )
|
def encrypt ( self , message , sessionkey = None , ** prefs ) :
"""Encrypt a PGPMessage using this key .
: param message : The message to encrypt .
: type message : : py : obj : ` PGPMessage `
: optional param sessionkey : Provide a session key to use when encrypting something . Default is ` ` None ` ` .
If ` ` None ` ` , a session key of the appropriate length will be generated randomly .
. . warning : :
Care should be taken when making use of this option ! Session keys * absolutely need *
to be unpredictable ! Use the ` ` gen _ key ( ) ` ` method on the desired
: py : obj : ` ~ constants . SymmetricKeyAlgorithm ` to generate the session key !
: type sessionkey : ` ` bytes ` ` , ` ` str ` `
: raises : : py : exc : ` ~ errors . PGPEncryptionError ` if encryption failed for any reason .
: returns : A new : py : obj : ` PGPMessage ` with the encrypted contents of ` ` message ` `
The following optional keyword arguments can be used with : py : meth : ` PGPKey . encrypt ` :
: keyword cipher : Specifies the symmetric block cipher to use when encrypting the message .
: type cipher : : py : obj : ` ~ constants . SymmetricKeyAlgorithm `
: keyword user : Specifies the User ID to use as the recipient for this encryption operation , for the purposes of
preference defaults and selection validation .
: type user : ` ` str ` ` , ` ` unicode ` `"""
|
user = prefs . pop ( 'user' , None )
uid = None
if user is not None :
uid = self . get_uid ( user )
else :
uid = next ( iter ( self . userids ) , None )
if uid is None and self . parent is not None :
uid = next ( iter ( self . parent . userids ) , None )
cipher_algo = prefs . pop ( 'cipher' , uid . selfsig . cipherprefs [ 0 ] )
if cipher_algo not in uid . selfsig . cipherprefs :
warnings . warn ( "Selected symmetric algorithm not in key preferences" , stacklevel = 3 )
if message . is_compressed and message . _compression not in uid . selfsig . compprefs :
warnings . warn ( "Selected compression algorithm not in key preferences" , stacklevel = 3 )
if sessionkey is None :
sessionkey = cipher_algo . gen_key ( )
# set up a new PKESessionKeyV3
pkesk = PKESessionKeyV3 ( )
pkesk . encrypter = bytearray ( binascii . unhexlify ( self . fingerprint . keyid . encode ( 'latin-1' ) ) )
pkesk . pkalg = self . key_algorithm
# pkesk . encrypt _ sk ( self . _ _ key _ _ , cipher _ algo , sessionkey )
pkesk . encrypt_sk ( self . _key , cipher_algo , sessionkey )
if message . is_encrypted : # pragma : no cover
_m = message
else :
_m = PGPMessage ( )
skedata = IntegrityProtectedSKEDataV1 ( )
skedata . encrypt ( sessionkey , cipher_algo , message . __bytes__ ( ) )
_m |= skedata
_m |= pkesk
return _m
|
def create_output ( out , shape , dtype , mode = 'w+' , suffix = None ) :
"""Return numpy array where image data of shape and dtype can be copied .
The ' out ' parameter may have the following values or types :
None
An empty array of shape and dtype is created and returned .
numpy . ndarray
An existing writable array of compatible dtype and shape . A view of
the same array is returned after verification .
' memmap ' or ' memmap : tempdir '
A memory - map to an array stored in a temporary binary file on disk
is created and returned .
str or open file
The file name or file object used to create a memory - map to an array
stored in a binary file on disk . The created memory - mapped array is
returned ."""
|
if out is None :
return numpy . zeros ( shape , dtype )
if isinstance ( out , str ) and out [ : 6 ] == 'memmap' :
import tempfile
# noqa : delay import
tempdir = out [ 7 : ] if len ( out ) > 7 else None
if suffix is None :
suffix = '.memmap'
with tempfile . NamedTemporaryFile ( dir = tempdir , suffix = suffix ) as fh :
return numpy . memmap ( fh , shape = shape , dtype = dtype , mode = mode )
if isinstance ( out , numpy . ndarray ) :
if product ( shape ) != product ( out . shape ) :
raise ValueError ( 'incompatible output shape' )
if not numpy . can_cast ( dtype , out . dtype ) :
raise ValueError ( 'incompatible output dtype' )
return out . reshape ( shape )
if isinstance ( out , pathlib . Path ) :
out = str ( out )
return numpy . memmap ( out , shape = shape , dtype = dtype , mode = mode )
|
def make_sources ( comp_key , comp_dict ) :
"""Make dictionary mapping component keys to a source
or set of sources
Parameters
comp _ key : str
Key used to access sources
comp _ dict : dict
Information used to build sources
return ` OrderedDict ` maping comp _ key to ` fermipy . roi _ model . Source `"""
|
srcdict = OrderedDict ( )
try :
comp_info = comp_dict . info
except AttributeError :
comp_info = comp_dict
try :
spectrum = comp_dict . spectrum
except AttributeError :
spectrum = None
model_type = comp_info . model_type
if model_type == 'PointSource' :
srcdict [ comp_key ] = make_point_source ( comp_info . source_name , comp_info . src_dict )
elif model_type == 'SpatialMap' :
srcdict [ comp_key ] = make_spatialmap_source ( comp_info . source_name , comp_info . Spatial_Filename , spectrum )
elif model_type == 'MapCubeSource' :
srcdict [ comp_key ] = make_mapcube_source ( comp_info . source_name , comp_info . Spatial_Filename , spectrum )
elif model_type == 'IsoSource' :
srcdict [ comp_key ] = make_isotropic_source ( comp_info . source_name , comp_info . Spectral_Filename , spectrum )
elif model_type == 'CompositeSource' :
srcdict [ comp_key ] = make_composite_source ( comp_info . source_name , spectrum )
elif model_type == 'CatalogSources' :
srcdict . update ( make_catalog_sources ( comp_info . roi_model , comp_info . source_names ) )
else :
raise ValueError ( "Unrecognized model_type %s" % model_type )
return srcdict
|
def _init_request_logging ( self , app ) :
"""Sets up request logging unless ` ` APPINSIGHTS _ DISABLE _ REQUEST _ LOGGING ` `
is set in the Flask config .
Args :
app ( flask . Flask ) . the Flask application for which to initialize the extension ."""
|
enabled = not app . config . get ( CONF_DISABLE_REQUEST_LOGGING , False )
if not enabled :
return
self . _requests_middleware = WSGIApplication ( self . _key , app . wsgi_app , telemetry_channel = self . _channel )
app . wsgi_app = self . _requests_middleware
|
def _parse_title ( file_path ) :
"""Parse a title from a file name"""
|
title = file_path
title = title . split ( '/' ) [ - 1 ]
title = '.' . join ( title . split ( '.' ) [ : - 1 ] )
title = ' ' . join ( title . split ( '-' ) )
title = ' ' . join ( [ word . capitalize ( ) for word in title . split ( ' ' ) ] )
return title
|
def main_target_usage_requirements ( self , specification , project ) :
"""Returns the use requirement to use when declaraing a main target ,
which are obtained by
- translating all specified property paths , and
- adding project ' s usage requirements
specification : Use - properties explicitly specified for a main target
project : Project where the main target is to be declared"""
|
assert is_iterable_typed ( specification , basestring )
assert isinstance ( project , ProjectTarget )
project_usage_requirements = project . get ( 'usage-requirements' )
# We don ' t use ' refine - from - user - input ' because I ' m not sure if :
# - removing of parent ' s usage requirements makes sense
# - refining of usage requirements is not needed , since usage requirements
# are always free .
usage_requirements = property_set . create_from_user_input ( specification , project . project_module ( ) , project . get ( "location" ) )
return project_usage_requirements . add ( usage_requirements )
|
async def _recover_jobs ( self , agent_addr ) :
"""Recover the jobs sent to a crashed agent"""
|
for ( client_addr , job_id ) , ( agent , job_msg , _ ) in reversed ( list ( self . _job_running . items ( ) ) ) :
if agent == agent_addr :
await ZMQUtils . send_with_addr ( self . _client_socket , client_addr , BackendJobDone ( job_id , ( "crash" , "Agent restarted" ) , 0.0 , { } , { } , { } , "" , None , None , None ) )
del self . _job_running [ ( client_addr , job_id ) ]
await self . update_queue ( )
|
def draw ( self , renderer ) :
"""Draw the children"""
|
dpi_cor = renderer . points_to_pixels ( 1. )
self . dpi_transform . clear ( )
self . dpi_transform . scale ( dpi_cor , dpi_cor )
for c in self . _children :
c . draw ( renderer )
self . stale = False
|
def filesfile_string ( self ) :
"""String with the list of files and prefixes needed to execute ABINIT ."""
|
lines = [ ]
app = lines . append
app ( self . input_file . path )
# 1 ) Path of the input file
app ( self . output_file . path )
# 2 ) Path of the output file
app ( self . ddb_filepath )
# 3 ) Input derivative database e . g . t13 . ddb . in
app ( self . md_filepath )
# 4 ) Output molecular dynamics e . g . t13 . md
app ( self . gkk_filepath )
# 5 ) Input elphon matrix elements ( GKK file )
app ( self . outdir . path_join ( "out" ) )
# 6 ) Base name for elphon output files e . g . t13
app ( self . ddk_filepath )
# 7 ) File containing ddk filenames for elphon / transport .
return "\n" . join ( lines )
|
def raise_401 ( instance , authenticate , msg = None ) :
"""Abort the current request with a 401 ( Unauthorized ) response code . If
the message is given it ' s output as an error message in the response body
( correctly converted to the requested MIME type ) . Outputs the
WWW - Authenticate header as given by the authenticate parameter .
: param instance : Resource instance ( used to access the response )
: type instance : : class : ` webob . resource . Resource `
: raises : : class : ` webob . exceptions . ResponseException ` of status 401"""
|
instance . response . status = 401
instance . response . headers [ 'WWW-Authenticate' ] = authenticate
if msg :
instance . response . body_raw = { 'error' : msg }
raise ResponseException ( instance . response )
|
def cscore ( args ) :
"""% prog cscore blastfile > cscoreOut
See supplementary info for sea anemone genome paper , C - score formula :
cscore ( A , B ) = score ( A , B ) /
max ( best score for A , best score for B )
A C - score of one is the same as reciprocal best hit ( RBH ) .
Output file will be 3 - column ( query , subject , cscore ) . Use - - cutoff to
select a different cutoff ."""
|
from jcvi . utils . cbook import gene_name
p = OptionParser ( cscore . __doc__ )
p . add_option ( "--cutoff" , default = .9999 , type = "float" , help = "Minimum C-score to report [default: %default]" )
p . add_option ( "--pct" , default = False , action = "store_true" , help = "Also include pct as last column [default: %default]" )
p . add_option ( "--writeblast" , default = False , action = "store_true" , help = "Also write filtered blast file [default: %default]" )
p . set_stripnames ( )
p . set_outfile ( )
opts , args = p . parse_args ( args )
ostrip = opts . strip_names
writeblast = opts . writeblast
outfile = opts . outfile
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
blastfile , = args
blast = Blast ( blastfile )
logging . debug ( "Register best scores .." )
best_score = defaultdict ( float )
for b in blast :
query , subject = b . query , b . subject
if ostrip :
query , subject = gene_name ( query ) , gene_name ( subject )
score = b . score
if score > best_score [ query ] :
best_score [ query ] = score
if score > best_score [ subject ] :
best_score [ subject ] = score
blast = Blast ( blastfile )
pairs = { }
cutoff = opts . cutoff
for b in blast :
query , subject = b . query , b . subject
if ostrip :
query , subject = gene_name ( query ) , gene_name ( subject )
score = b . score
pctid = b . pctid
s = score / max ( best_score [ query ] , best_score [ subject ] )
if s > cutoff :
pair = ( query , subject )
if pair not in pairs or s > pairs [ pair ] [ 0 ] :
pairs [ pair ] = ( s , pctid , b )
fw = must_open ( outfile , "w" )
if writeblast :
fwb = must_open ( outfile + ".filtered.blast" , "w" )
pct = opts . pct
for ( query , subject ) , ( s , pctid , b ) in sorted ( pairs . items ( ) ) :
args = [ query , subject , "{0:.2f}" . format ( s ) ]
if pct :
args . append ( "{0:.1f}" . format ( pctid ) )
print ( "\t" . join ( args ) , file = fw )
if writeblast :
print ( b , file = fwb )
fw . close ( )
if writeblast :
fwb . close ( )
|
def remove_all ( self , key ) :
"""Transactional implementation of : func : ` MultiMap . remove _ all ( key )
< hazelcast . proxy . multi _ map . MultiMap . remove _ all > `
: param key : ( object ) , the key of the entries to remove .
: return : ( list ) , the collection of the values associated with the key ."""
|
check_not_none ( key , "key can't be none" )
return self . _encode_invoke ( transactional_multi_map_remove_codec , key = self . _to_data ( key ) )
|
def simxReadVisionSensor ( clientID , sensorHandle , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual'''
|
detectionState = ct . c_ubyte ( )
auxValues = ct . POINTER ( ct . c_float ) ( )
auxValuesCount = ct . POINTER ( ct . c_int ) ( )
ret = c_ReadVisionSensor ( clientID , sensorHandle , ct . byref ( detectionState ) , ct . byref ( auxValues ) , ct . byref ( auxValuesCount ) , operationMode )
auxValues2 = [ ]
if ret == 0 :
s = 0
for i in range ( auxValuesCount [ 0 ] ) :
auxValues2 . append ( auxValues [ s : s + auxValuesCount [ i + 1 ] ] )
s += auxValuesCount [ i + 1 ]
# free C buffers
c_ReleaseBuffer ( auxValues )
c_ReleaseBuffer ( auxValuesCount )
return ret , bool ( detectionState . value != 0 ) , auxValues2
|
def add_integer_proxy_for ( self , label : str , shape : Collection [ int ] = None ) -> Vertex :
"""Creates a proxy vertex for the given label and adds to the sequence item"""
|
if shape is None :
return Vertex . _from_java_vertex ( self . unwrap ( ) . addIntegerProxyFor ( _VertexLabel ( label ) . unwrap ( ) ) )
else :
return Vertex . _from_java_vertex ( self . unwrap ( ) . addIntegerProxyFor ( _VertexLabel ( label ) . unwrap ( ) , shape ) )
|
def start ( self , wait = 60 , * , server_settings = { } , ** opts ) :
"""Start the cluster ."""
|
status = self . get_status ( )
if status == 'running' :
return
elif status == 'not-initialized' :
raise ClusterError ( 'cluster in {!r} has not been initialized' . format ( self . _data_dir ) )
port = opts . pop ( 'port' , None )
if port == 'dynamic' :
port = find_available_port ( )
extra_args = [ '--{}={}' . format ( k , v ) for k , v in opts . items ( ) ]
extra_args . append ( '--port={}' . format ( port ) )
sockdir = server_settings . get ( 'unix_socket_directories' )
if sockdir is None :
sockdir = server_settings . get ( 'unix_socket_directory' )
if sockdir is None :
sockdir = '/tmp'
ssl_key = server_settings . get ( 'ssl_key_file' )
if ssl_key : # Make sure server certificate key file has correct permissions .
keyfile = os . path . join ( self . _data_dir , 'srvkey.pem' )
shutil . copy ( ssl_key , keyfile )
os . chmod ( keyfile , 0o600 )
server_settings = server_settings . copy ( )
server_settings [ 'ssl_key_file' ] = keyfile
if self . _pg_version < ( 9 , 3 ) :
sockdir_opt = 'unix_socket_directory'
else :
sockdir_opt = 'unix_socket_directories'
server_settings [ sockdir_opt ] = sockdir
for k , v in server_settings . items ( ) :
extra_args . extend ( [ '-c' , '{}={}' . format ( k , v ) ] )
if _system == 'Windows' : # On Windows we have to use pg _ ctl as direct execution
# of postgres daemon under an Administrative account
# is not permitted and there is no easy way to drop
# privileges .
if os . getenv ( 'ASYNCPG_DEBUG_SERVER' ) :
stdout = sys . stdout
else :
stdout = subprocess . DEVNULL
process = subprocess . run ( [ self . _pg_ctl , 'start' , '-D' , self . _data_dir , '-o' , ' ' . join ( extra_args ) ] , stdout = stdout , stderr = subprocess . STDOUT )
if process . returncode != 0 :
if process . stderr :
stderr = ':\n{}' . format ( process . stderr . decode ( ) )
else :
stderr = ''
raise ClusterError ( 'pg_ctl start exited with status {:d}{}' . format ( process . returncode , stderr ) )
else :
if os . getenv ( 'ASYNCPG_DEBUG_SERVER' ) :
stdout = sys . stdout
else :
stdout = subprocess . DEVNULL
self . _daemon_process = subprocess . Popen ( [ self . _postgres , '-D' , self . _data_dir , * extra_args ] , stdout = stdout , stderr = subprocess . STDOUT )
self . _daemon_pid = self . _daemon_process . pid
self . _test_connection ( timeout = wait )
|
def _inherit_outputs ( self , pipeline_name , already_defined , resolve_outputs = False ) :
"""Inherits outputs from a calling Pipeline .
Args :
pipeline _ name : The Pipeline class name ( used for debugging ) .
already _ defined : Maps output name to stringified db . Key ( of _ SlotRecords )
of any exiting output slots to be inherited by this future .
resolve _ outputs : When True , this method will dereference all output slots
before returning back to the caller , making those output slots ' values
available .
Raises :
UnexpectedPipelineError when resolve _ outputs is True and any of the output
slots could not be retrived from the Datastore ."""
|
for name , slot_key in already_defined . iteritems ( ) :
if not isinstance ( slot_key , db . Key ) :
slot_key = db . Key ( slot_key )
slot = self . _output_dict . get ( name )
if slot is None :
if self . _strict :
raise UnexpectedPipelineError ( 'Inherited output named "%s" must be filled but ' 'not declared for pipeline class "%s"' % ( name , pipeline_name ) )
else :
self . _output_dict [ name ] = Slot ( name = name , slot_key = slot_key )
else :
slot . key = slot_key
slot . _exists = True
if resolve_outputs :
slot_key_dict = dict ( ( s . key , s ) for s in self . _output_dict . itervalues ( ) )
all_slots = db . get ( slot_key_dict . keys ( ) )
for slot , slot_record in zip ( slot_key_dict . itervalues ( ) , all_slots ) :
if slot_record is None :
raise UnexpectedPipelineError ( 'Inherited output named "%s" for pipeline class "%s" is ' 'missing its Slot in the datastore: "%s"' % ( slot . name , pipeline_name , slot . key ) )
slot = slot_key_dict [ slot_record . key ( ) ]
slot . _set_value ( slot_record )
|
def _share_project ( self , destination , project , to_user , force_send , auth_role = '' , user_message = '' , share_users = None ) :
"""Send message to remote service to email / share project with to _ user .
: param destination : str which type of sharing we are doing ( SHARE _ DESTINATION or DELIVER _ DESTINATION )
: param project : RemoteProject project we are sharing
: param to _ user : RemoteUser user we are sharing with
: param auth _ role : str project role eg ' project _ admin ' email is customized based on this setting .
: param user _ message : str message to be sent with the share
: param share _ users : [ RemoteUser ] users to have this project shared with after delivery ( delivery only )
: return : the email the user should receive a message on soon"""
|
from_user = self . remote_store . get_current_user ( )
share_user_ids = None
if share_users :
share_user_ids = [ share_user . id for share_user in share_users ]
item = D4S2Item ( destination = destination , from_user_id = from_user . id , to_user_id = to_user . id , project_id = project . id , project_name = project . name , auth_role = auth_role , user_message = user_message , share_user_ids = share_user_ids )
item . send ( self . api , force_send )
return to_user . email
|
def get_code_num_rgb ( s : str ) -> Optional [ Tuple [ int , int , int ] ] :
"""Get rgb code numbers from an RGB escape code .
Raises InvalidRgbEscapeCode if an invalid number is found ."""
|
parts = s . split ( ';' )
if len ( parts ) != 5 :
raise InvalidRgbEscapeCode ( s , reason = 'Count is off.' )
rgbparts = parts [ - 3 : ]
if not rgbparts [ 2 ] . endswith ( 'm' ) :
raise InvalidRgbEscapeCode ( s , reason = 'Missing \'m\' on the end.' )
rgbparts [ 2 ] = rgbparts [ 2 ] . rstrip ( 'm' )
try :
r , g , b = [ int ( x ) for x in rgbparts ]
except ValueError as ex :
raise InvalidRgbEscapeCode ( s ) from ex
if not all ( in_range ( x , 0 , 255 ) for x in ( r , g , b ) ) :
raise InvalidRgbEscapeCode ( s , reason = 'Not in range 0-255.' )
return r , g , b
|
def save ( self , * args , ** kwargs ) :
"""Save the person model so it updates his / her number of board connections field .
Also updates name"""
|
self . name = str ( self . company . name ) + " --- " + str ( self . person )
super ( Director , self ) . save ( * args , ** kwargs )
# Call the " real " save ( ) method .
other_directors = Director . objects . filter ( company = self . company )
for director in other_directors :
director . person . save ( )
|
def _update_in_hdx ( self , object_type , id_field_name , file_to_upload = None , ** kwargs ) : # type : ( str , str , Optional [ str ] , Any ) - > None
"""Helper method to check if HDX object exists in HDX and if so , update it
Args :
object _ type ( str ) : Description of HDX object type ( for messages )
id _ field _ name ( str ) : Name of field containing HDX object identifier
file _ to _ upload ( Optional [ str ] ) : File to upload to HDX
* * kwargs : See below
operation ( string ) : Operation to perform eg . patch . Defaults to update .
Returns :
None"""
|
self . _check_load_existing_object ( object_type , id_field_name )
# We load an existing object even thought it may well have been loaded already
# to prevent an admittedly unlikely race condition where someone has updated
# the object in the intervening time
self . _merge_hdx_update ( object_type , id_field_name , file_to_upload , ** kwargs )
|
def run ( self , writer , reader ) :
"""Pager entry point .
In interactive mode ( terminal is a tty ) , run until
` ` process _ keystroke ( ) ` ` detects quit keystroke ( ' q ' ) . In
non - interactive mode , exit after displaying all unicode points .
: param writer : callable writes to output stream , receiving unicode .
: type writer : callable
: param reader : callable reads keystrokes from input stream , sending
instance of blessed . keyboard . Keystroke .
: type reader : callable"""
|
self . _page_data = self . initialize_page_data ( )
self . _set_lastpage ( )
if not self . term . is_a_tty :
self . _run_notty ( writer )
else :
self . _run_tty ( writer , reader )
|
def unindex_objects ( mapping_type , ids , es = None , index = None ) :
"""Remove documents of a specified mapping _ type from the index .
This allows for asynchronous deleting .
If a mapping _ type extends Indexable , you can add a ` ` pre _ delete ` `
hook for the model that it ' s based on like this : :
@ receiver ( dbsignals . pre _ delete , sender = MyModel )
def remove _ from _ index ( sender , instance , * * kw ) :
from elasticutils . contrib . django import tasks
tasks . unindex _ objects . delay ( MyMappingType , [ instance . id ] )
: arg mapping _ type : the mapping type for these ids
: arg ids : the list of ids of things to remove
: arg es : The ` Elasticsearch ` to use . If you don ' t specify an
` Elasticsearch ` , it ' ll use ` mapping _ type . get _ es ( ) ` .
: arg index : The name of the index to use . If you don ' t specify one
it ' ll use ` mapping _ type . get _ index ( ) ` ."""
|
if settings . ES_DISABLED :
return
for id_ in ids :
mapping_type . unindex ( id_ , es = es , index = index )
|
def get_slice_nodes ( self , time_slice = 0 ) :
"""Returns the nodes present in a particular timeslice
Parameters
time _ slice : int
The timeslice should be a positive value greater than or equal to zero
Examples
> > > from pgmpy . models import DynamicBayesianNetwork as DBN
> > > dbn = DBN ( )
> > > dbn . add _ nodes _ from ( [ ' D ' , ' G ' , ' I ' , ' S ' , ' L ' ] )
> > > dbn . add _ edges _ from ( [ ( ( ' D ' , 0 ) , ( ' G ' , 0 ) ) , ( ( ' I ' , 0 ) , ( ' G ' , 0 ) ) , ( ( ' G ' , 0 ) , ( ' L ' , 0 ) ) , ( ( ' D ' , 0 ) , ( ' D ' , 1 ) ) ] )
> > > dbn . get _ slice _ nodes ( )"""
|
if not isinstance ( time_slice , int ) or time_slice < 0 :
raise ValueError ( "The timeslice should be a positive value greater than or equal to zero" )
return [ ( node , time_slice ) for node in self . _nodes ( ) ]
|
def iter_cols ( self , start = None , end = None ) :
"""Iterate each of the Region cols in this region"""
|
start = start or 0
end = end or self . ncols
for i in range ( start , end ) :
yield self . iloc [ : , i ]
|
def stencil_grid ( S , grid , dtype = None , format = None ) :
"""Construct a sparse matrix form a local matrix stencil .
Parameters
S : ndarray
matrix stencil stored in N - d array
grid : tuple
tuple containing the N grid dimensions
dtype :
data type of the result
format : string
sparse matrix format to return , e . g . " csr " , " coo " , etc .
Returns
A : sparse matrix
Sparse matrix which represents the operator given by applying
stencil S at each vertex of a regular grid with given dimensions .
Notes
The grid vertices are enumerated as arange ( prod ( grid ) ) . reshape ( grid ) .
This implies that the last grid dimension cycles fastest , while the
first dimension cycles slowest . For example , if grid = ( 2,3 ) then the
grid vertices are ordered as ( 0,0 ) , ( 0,1 ) , ( 0,2 ) , ( 1,0 ) , ( 1,1 ) , ( 1,2 ) .
This coincides with the ordering used by the NumPy functions
ndenumerate ( ) and mgrid ( ) .
Examples
> > > from pyamg . gallery import stencil _ grid
> > > stencil = [ - 1,2 , - 1 ] # 1D Poisson stencil
> > > grid = ( 5 , ) # 1D grid with 5 vertices
> > > A = stencil _ grid ( stencil , grid , dtype = float , format = ' csr ' )
> > > A . todense ( )
matrix ( [ [ 2 . , - 1 . , 0 . , 0 . , 0 . ] ,
[ - 1 . , 2 . , - 1 . , 0 . , 0 . ] ,
[ 0 . , - 1 . , 2 . , - 1 . , 0 . ] ,
[ 0 . , 0 . , - 1 . , 2 . , - 1 . ] ,
[ 0 . , 0 . , 0 . , - 1 . , 2 . ] ] )
> > > stencil = [ [ 0 , - 1,0 ] , [ - 1,4 , - 1 ] , [ 0 , - 1,0 ] ] # 2D Poisson stencil
> > > grid = ( 3,3 ) # 2D grid with shape 3x3
> > > A = stencil _ grid ( stencil , grid , dtype = float , format = ' csr ' )
> > > A . todense ( )
matrix ( [ [ 4 . , - 1 . , 0 . , - 1 . , 0 . , 0 . , 0 . , 0 . , 0 . ] ,
[ - 1 . , 4 . , - 1 . , 0 . , - 1 . , 0 . , 0 . , 0 . , 0 . ] ,
[ 0 . , - 1 . , 4 . , 0 . , 0 . , - 1 . , 0 . , 0 . , 0 . ] ,
[ - 1 . , 0 . , 0 . , 4 . , - 1 . , 0 . , - 1 . , 0 . , 0 . ] ,
[ 0 . , - 1 . , 0 . , - 1 . , 4 . , - 1 . , 0 . , - 1 . , 0 . ] ,
[ 0 . , 0 . , - 1 . , 0 . , - 1 . , 4 . , 0 . , 0 . , - 1 . ] ,
[ 0 . , 0 . , 0 . , - 1 . , 0 . , 0 . , 4 . , - 1 . , 0 . ] ,
[ 0 . , 0 . , 0 . , 0 . , - 1 . , 0 . , - 1 . , 4 . , - 1 . ] ,
[ 0 . , 0 . , 0 . , 0 . , 0 . , - 1 . , 0 . , - 1 . , 4 . ] ] )"""
|
S = np . asarray ( S , dtype = dtype )
grid = tuple ( grid )
if not ( np . asarray ( S . shape ) % 2 == 1 ) . all ( ) :
raise ValueError ( 'all stencil dimensions must be odd' )
if len ( grid ) != np . ndim ( S ) :
raise ValueError ( 'stencil dimension must equal number of grid\
dimensions' )
if min ( grid ) < 1 :
raise ValueError ( 'grid dimensions must be positive' )
N_v = np . prod ( grid )
# number of vertices in the mesh
N_s = ( S != 0 ) . sum ( )
# number of nonzero stencil entries
# diagonal offsets
diags = np . zeros ( N_s , dtype = int )
# compute index offset of each dof within the stencil
strides = np . cumprod ( [ 1 ] + list ( reversed ( grid ) ) ) [ : - 1 ]
indices = tuple ( i . copy ( ) for i in S . nonzero ( ) )
for i , s in zip ( indices , S . shape ) :
i -= s // 2
# i = ( i - s ) / / 2
# i = i / / 2
# i = i - ( s / / 2)
for stride , coords in zip ( strides , reversed ( indices ) ) :
diags += stride * coords
data = S [ S != 0 ] . repeat ( N_v ) . reshape ( N_s , N_v )
indices = np . vstack ( indices ) . T
# zero boundary connections
for index , diag in zip ( indices , data ) :
diag = diag . reshape ( grid )
for n , i in enumerate ( index ) :
if i > 0 :
s = [ slice ( None ) ] * len ( grid )
s [ n ] = slice ( 0 , i )
s = tuple ( s )
diag [ s ] = 0
elif i < 0 :
s = [ slice ( None ) ] * len ( grid )
s [ n ] = slice ( i , None )
s = tuple ( s )
diag [ s ] = 0
# remove diagonals that lie outside matrix
mask = abs ( diags ) < N_v
if not mask . all ( ) :
diags = diags [ mask ]
data = data [ mask ]
# sum duplicate diagonals
if len ( np . unique ( diags ) ) != len ( diags ) :
new_diags = np . unique ( diags )
new_data = np . zeros ( ( len ( new_diags ) , data . shape [ 1 ] ) , dtype = data . dtype )
for dia , dat in zip ( diags , data ) :
n = np . searchsorted ( new_diags , dia )
new_data [ n , : ] += dat
diags = new_diags
data = new_data
return sparse . dia_matrix ( ( data , diags ) , shape = ( N_v , N_v ) ) . asformat ( format )
|
def magic ( adata , name_list = None , k = 10 , a = 15 , t = 'auto' , n_pca = 100 , knn_dist = 'euclidean' , random_state = None , n_jobs = None , verbose = False , copy = None , ** kwargs ) :
"""Markov Affinity - based Graph Imputation of Cells ( MAGIC ) API [ vanDijk18 ] _ .
MAGIC is an algorithm for denoising and transcript recover of single cells
applied to single - cell sequencing data . MAGIC builds a graph from the data
and uses diffusion to smooth out noise and recover the data manifold .
More information and bug reports
` here < https : / / github . com / KrishnaswamyLab / MAGIC > ` _ _ . For help , visit
< https : / / krishnaswamylab . org / get - help > .
Parameters
adata : : class : ` ~ scanpy . api . AnnData `
An anndata file with ` . raw ` attribute representing raw counts .
name _ list : ` list ` , ` ' all _ genes ' ` , or ` ' pca _ only ' ` , optional ( default : ` ' all _ genes ' ` )
Denoised genes to return . Default is all genes , but this
may require a large amount of memory if the input data is sparse .
k : int , optional , default : 10
number of nearest neighbors on which to build kernel
a : int , optional , default : 15
sets decay rate of kernel tails .
If None , alpha decaying kernel is not used
t : int , optional , default : ' auto '
power to which the diffusion operator is powered .
This sets the level of diffusion . If ' auto ' , t is selected
according to the Procrustes disparity of the diffused data
n _ pca : int , optional , default : 100
Number of principal components to use for calculating
neighborhoods . For extremely large datasets , using
n _ pca < 20 allows neighborhoods to be calculated in
roughly log ( n _ samples ) time .
knn _ dist : string , optional , default : ' euclidean '
recommended values : ' euclidean ' , ' cosine ' , ' precomputed '
Any metric from ` scipy . spatial . distance ` can be used
distance metric for building kNN graph . If ' precomputed ' ,
` data ` should be an n _ samples x n _ samples distance or
affinity matrix
random _ state : ` int ` , ` numpy . RandomState ` or ` None ` , optional ( default : ` None ` )
Random seed . Defaults to the global ` numpy ` random number generator
n _ jobs : ` int ` or None , optional . Default : None
Number of threads to use in training . All cores are used by default .
verbose : ` bool ` , ` int ` or ` None ` , optional ( default : ` sc . settings . verbosity ` )
If ` True ` or an integer ` > = 2 ` , print status messages .
If ` None ` , ` sc . settings . verbosity ` is used .
copy : ` bool ` or ` None ` , optional . Default : ` None ` .
If true , a copy of anndata is returned . If ` None ` , ` copy ` is True if
` genes ` is not ` ' all _ genes ' ` or ` ' pca _ only ' ` . ` copy ` may only be False
if ` genes ` is ` ' all _ genes ' ` or ` ' pca _ only ' ` , as the resultant data
will otherwise have different column names from the input data .
kwargs : additional arguments to ` magic . MAGIC `
Returns
If ` copy ` is True , AnnData object is returned .
If ` subset _ genes ` is not ` all _ genes ` , PCA on MAGIC values of cells are stored in
` adata . obsm [ ' X _ magic ' ] ` and ` adata . X ` is not modified .
The raw counts are stored in ` . raw ` attribute of AnnData object .
Examples
> > > import scanpy . api as sc
> > > import magic
> > > adata = sc . datasets . paul15 ( )
> > > sc . pp . normalize _ per _ cell ( adata )
> > > sc . pp . sqrt ( adata ) # or sc . pp . log1p ( adata )
> > > adata _ magic = sc . pp . magic ( adata , name _ list = [ ' Mpo ' , ' Klf1 ' , ' Ifitm1 ' ] , k = 5)
> > > adata _ magic . shape
(2730 , 3)
> > > sc . pp . magic ( adata , name _ list = ' pca _ only ' , k = 5)
> > > adata . obsm [ ' X _ magic ' ] . shape
(2730 , 100)
> > > sc . pp . magic ( adata , name _ list = ' all _ genes ' , k = 5)
> > > adata . X . shape
(2730 , 3451)"""
|
try :
from magic import MAGIC
except ImportError :
raise ImportError ( 'Please install magic package via `pip install --user ' 'git+git://github.com/KrishnaswamyLab/MAGIC.git#subdirectory=python`' )
logg . info ( 'computing PHATE' , r = True )
needs_copy = not ( name_list is None or ( isinstance ( name_list , str ) and name_list in [ "all_genes" , "pca_only" ] ) )
if copy is None :
copy = needs_copy
elif needs_copy and not copy :
raise ValueError ( "Can only perform MAGIC in-place with `name_list=='all_genes' or " "`name_list=='pca_only'` (got {}). Consider setting " "`copy=True`" . format ( name_list ) )
adata = adata . copy ( ) if copy else adata
verbose = settings . verbosity if verbose is None else verbose
if isinstance ( verbose , ( str , int ) ) :
verbose = _settings_verbosity_greater_or_equal_than ( 2 )
n_jobs = settings . n_jobs if n_jobs is None else n_jobs
X_magic = MAGIC ( k = k , a = a , t = t , n_pca = n_pca , knn_dist = knn_dist , random_state = random_state , n_jobs = n_jobs , verbose = verbose , ** kwargs ) . fit_transform ( adata , genes = name_list )
logg . info ( ' finished' , time = True , end = ' ' if _settings_verbosity_greater_or_equal_than ( 3 ) else '\n' )
# update AnnData instance
if name_list == "pca_only" : # special case - update adata . obsm with smoothed values
adata . obsm [ "X_magic" ] = X_magic . X
logg . hint ( 'added\n' ' \'X_magic\', PCA on MAGIC coordinates (adata.obsm)' )
elif copy : # just return X _ magic
X_magic . raw = adata
adata = X_magic
else : # replace data with smoothed data
adata . raw = adata
adata . X = X_magic . X
if copy :
return adata
|
async def create_http_connection ( loop , protocol_factory , host , port = 25105 , auth = None ) :
"""Create an HTTP session used to connect to the Insteon Hub ."""
|
protocol = protocol_factory ( )
transport = HttpTransport ( loop , protocol , host , port , auth )
_LOGGER . debug ( "create_http_connection Finished creating connection" )
return ( transport , protocol )
|
def _read2 ( self , length = None , use_compression = None , project = None , ** kwargs ) :
''': param length : Maximum number of bytes to be read
: type length : integer
: param project : project to use as context for this download ( may affect
which billing account is billed for this download ) . If specified ,
must be a project in which this file exists . If not specified , the
project ID specified in the handler is used for the download , IF it
contains this file . If set to DXFile . NO _ PROJECT _ HINT , no project ID
is supplied for the download , even if the handler specifies a
project ID .
: type project : str or None
: rtype : string
: raises : : exc : ` ~ dxpy . exceptions . ResourceNotFound ` if * project * is supplied
and it does not contain this file
Returns the next * length * bytes , or all the bytes until the end of file
( if no * length * is given or there are fewer than * length * bytes left in
the file ) .
. . note : : After the first call to read ( ) , the project arg and
passthrough kwargs are not respected while using the same response
iterator ( i . e . until next seek ) .'''
|
if self . _file_length == None :
desc = self . describe ( ** kwargs )
if desc [ "state" ] != "closed" :
raise DXFileError ( "Cannot read from file until it is in the closed state" )
self . _file_length = int ( desc [ "size" ] )
# If running on a worker , wait for the first file download chunk
# to come back before issuing any more requests . This ensures
# that all subsequent requests can take advantage of caching ,
# rather than having all of the first DXFILE _ HTTP _ THREADS
# requests simultaneously hit a cold cache . Enforce a minimum
# size for this heuristic so we don ' t incur the overhead for
# tiny files ( which wouldn ' t contribute as much to the load
# anyway ) .
get_first_chunk_sequentially = ( self . _file_length > 128 * 1024 and self . _pos == 0 and dxpy . JOB_ID )
if self . _pos == self . _file_length :
return b""
if length == None or length > self . _file_length - self . _pos :
length = self . _file_length - self . _pos
buf = self . _read_buf
buf_remaining_bytes = dxpy . utils . string_buffer_length ( buf ) - buf . tell ( )
if length <= buf_remaining_bytes :
self . _pos += length
return buf . read ( length )
else :
orig_buf_pos = buf . tell ( )
orig_file_pos = self . _pos
buf . seek ( 0 , os . SEEK_END )
self . _pos += buf_remaining_bytes
while self . _pos < orig_file_pos + length :
remaining_len = orig_file_pos + length - self . _pos
if self . _response_iterator is None :
self . _request_iterator = self . _generate_read_requests ( start_pos = self . _pos , project = project , ** kwargs )
content = self . _next_response_content ( get_first_chunk_sequentially = get_first_chunk_sequentially )
if len ( content ) < remaining_len :
buf . write ( content )
self . _pos += len ( content )
else : # response goes beyond requested length
buf . write ( content [ : remaining_len ] )
self . _pos += remaining_len
self . _read_buf = BytesIO ( )
self . _read_buf . write ( content [ remaining_len : ] )
self . _read_buf . seek ( 0 )
buf . seek ( orig_buf_pos )
return buf . read ( )
|
def get_qualifier_id ( self ) :
"""Gets the ` ` Qualifier Id ` ` for this authorization .
return : ( osid . id . Id ) - the qualifier ` ` Id ` `
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for osid . learning . Activity . get _ objective _ id
if not bool ( self . _my_map [ 'qualifierId' ] ) :
raise errors . IllegalState ( 'qualifier empty' )
return Id ( self . _my_map [ 'qualifierId' ] )
|
def report ( data ) :
"""Create a Rmd report for small RNAseq analysis"""
|
work_dir = dd . get_work_dir ( data [ 0 ] [ 0 ] )
out_dir = op . join ( work_dir , "report" )
safe_makedir ( out_dir )
summary_file = op . join ( out_dir , "summary.csv" )
with file_transaction ( summary_file ) as out_tx :
with open ( out_tx , 'w' ) as out_handle :
out_handle . write ( "sample_id,%s\n" % _guess_header ( data [ 0 ] [ 0 ] ) )
for sample in data :
info = sample [ 0 ]
group = _guess_group ( info )
files = info [ "seqbuster" ] if "seqbuster" in info else "None"
out_handle . write ( "," . join ( [ dd . get_sample_name ( info ) , group ] ) + "\n" )
_modify_report ( work_dir , out_dir )
return summary_file
|
def _create_connection ( self ) :
"""Create a new websocket connection with proper headers ."""
|
logging . debug ( "Initializing new websocket connection." )
headers = { 'Authorization' : self . service . _get_bearer_token ( ) , 'Predix-Zone-Id' : self . ingest_zone_id , 'Content-Type' : 'application/json' , }
url = self . ingest_uri
logging . debug ( "URL=" + str ( url ) )
logging . debug ( "HEADERS=" + str ( headers ) )
# Should consider connection pooling and longer timeouts
return websocket . create_connection ( url , header = headers )
|
def get_tagged_version_numbers ( series = 'stable' ) :
"""Retrieve git tags and find version numbers for a release series
series - ' stable ' , ' oldstable ' , or ' testing '"""
|
releases = [ ]
if series == 'testing' : # Testing releases always have a hyphen after the version number :
tag_regex = re . compile ( '^refs/tags/cassandra-([0-9]+\.[0-9]+\.[0-9]+-.*$)' )
else : # Stable and oldstable releases are just a number :
tag_regex = re . compile ( '^refs/tags/cassandra-([0-9]+\.[0-9]+\.[0-9]+$)' )
tag_url = urllib . request . urlopen ( GITHUB_TAGS )
for ref in ( i . get ( 'ref' , '' ) for i in json . loads ( tag_url . read ( ) ) ) :
m = tag_regex . match ( ref )
if m :
releases . append ( LooseVersion ( m . groups ( ) [ 0 ] ) )
# Sort by semver :
releases . sort ( reverse = True )
stable_major_version = LooseVersion ( str ( releases [ 0 ] . version [ 0 ] ) + "." + str ( releases [ 0 ] . version [ 1 ] ) )
stable_releases = [ r for r in releases if r >= stable_major_version ]
oldstable_releases = [ r for r in releases if r not in stable_releases ]
oldstable_major_version = LooseVersion ( str ( oldstable_releases [ 0 ] . version [ 0 ] ) + "." + str ( oldstable_releases [ 0 ] . version [ 1 ] ) )
oldstable_releases = [ r for r in oldstable_releases if r >= oldstable_major_version ]
if series == 'testing' :
return [ r . vstring for r in releases ]
elif series == 'stable' :
return [ r . vstring for r in stable_releases ]
elif series == 'oldstable' :
return [ r . vstring for r in oldstable_releases ]
else :
raise AssertionError ( "unknown release series: {series}" . format ( series = series ) )
|
def detach ( self , dwProcessId , bIgnoreExceptions = False ) :
"""Detaches from a process currently being debugged .
@ note : On Windows 2000 and below the process is killed .
@ see : L { attach } , L { detach _ from _ all }
@ type dwProcessId : int
@ param dwProcessId : Global ID of a process to detach from .
@ type bIgnoreExceptions : bool
@ param bIgnoreExceptions : C { True } to ignore any exceptions that may be
raised when detaching . C { False } to stop and raise an exception when
encountering an error .
@ raise WindowsError : Raises an exception on error , unless
C { bIgnoreExceptions } is C { True } ."""
|
# Keep a reference to the process . We ' ll need it later .
try :
aProcess = self . system . get_process ( dwProcessId )
except KeyError :
aProcess = Process ( dwProcessId )
# Determine if there is support for detaching .
# This check should only fail on Windows 2000 and older .
try :
win32 . DebugActiveProcessStop
can_detach = True
except AttributeError :
can_detach = False
# Continue the last event before detaching .
# XXX not sure about this . . .
try :
if can_detach and self . lastEvent and self . lastEvent . get_pid ( ) == dwProcessId :
self . cont ( self . lastEvent )
except Exception :
if not bIgnoreExceptions :
raise
e = sys . exc_info ( ) [ 1 ]
warnings . warn ( str ( e ) , RuntimeWarning )
# Cleanup all data referring to the process .
self . __cleanup_process ( dwProcessId , bIgnoreExceptions = bIgnoreExceptions )
try : # Detach from the process .
# On Windows 2000 and before , kill the process .
if can_detach :
try :
win32 . DebugActiveProcessStop ( dwProcessId )
except Exception :
if not bIgnoreExceptions :
raise
e = sys . exc_info ( ) [ 1 ]
warnings . warn ( str ( e ) , RuntimeWarning )
else :
try :
aProcess . kill ( )
except Exception :
if not bIgnoreExceptions :
raise
e = sys . exc_info ( ) [ 1 ]
warnings . warn ( str ( e ) , RuntimeWarning )
finally : # Cleanup what remains of the process data .
aProcess . clear ( )
|
def check_plugin ( self , plugin ) :
"""Check if the section is in the proper format vcf format .
Args :
vcf _ section ( dict ) : The information from a vcf section
Returns :
True is it is in the proper format"""
|
vcf_section = self [ plugin ]
try :
vcf_field = vcf_section [ 'field' ]
if not vcf_field in self . vcf_columns :
raise ValidateError ( "field has to be in {0}\n" "Wrong field name in plugin: {1}" . format ( self . vcf_columns , plugin ) )
if vcf_field == 'INFO' :
try :
info_key = vcf_section [ 'info_key' ]
if info_key == 'CSQ' :
try :
csq_key = vcf_section [ 'csq_key' ]
except KeyError :
raise ValidateError ( "CSQ entrys has to refer to an csq field.\n" "Refer with keyword 'csq_key'\n" "csq_key is missing in section: {0}" . format ( plugin ) )
except KeyError :
raise ValidateError ( "INFO entrys has to refer to an INFO field.\n" "Refer with keyword 'info_key'\n" "info_key is missing in section: {0}" . format ( plugin ) )
except KeyError :
raise ValidateError ( "Vcf entrys have to refer to a field in the VCF with keyword" " 'field'.\nMissing keyword 'field' in plugin: {0}" . format ( plugin ) )
try :
data_type = vcf_section [ 'data_type' ]
if not data_type in self . data_types :
raise ValidateError ( "data_type has to be in {0}\n" "Wrong data_type in plugin: {1}" . format ( self . data_types , plugin ) )
except KeyError :
raise ValidateError ( "Vcf entrys have to refer to a data type in the VCF with " "keyword 'data_type'.\n" "Missing data_type in plugin: {0}" . format ( plugin ) )
separators = vcf_section . get ( 'separators' , None )
if separators :
if len ( separators ) == 1 :
self [ plugin ] [ 'separators' ] = list ( separators )
else :
if data_type != 'flag' :
raise ValidateError ( "If data_type != flag the separators have to be defined" "Missing separators in plugin: {0}" . format ( plugin ) )
record_rule = vcf_section . get ( 'record_rule' , None )
if record_rule :
if not record_rule in [ 'min' , 'max' ] :
raise ValidateError ( "Record rules have to be in {0}\n" "Wrong record_rule in plugin: {1}" . format ( [ 'min' , 'max' ] , plugin ) )
else :
self . logger . info ( "Setting record rule to default: 'max'" )
return True
|
def _fetchall ( self , query , vars , limit = None , offset = 0 ) :
"""Return multiple rows ."""
|
if limit is None :
limit = current_app . config [ 'DEFAULT_PAGE_SIZE' ]
query += ' LIMIT %s OFFSET %s' '' % ( limit , offset )
cursor = self . get_db ( ) . cursor ( )
self . _log ( cursor , query , vars )
cursor . execute ( query , vars )
return cursor . fetchall ( )
|
def show_events ( self , status = None , nids = None ) :
"""Print the Abinit events ( ERRORS , WARNIING , COMMENTS ) to stdout
Args :
status : if not None , only the tasks with this status are select
nids : optional list of node identifiers used to filter the tasks ."""
|
nrows , ncols = get_terminal_size ( )
for task in self . iflat_tasks ( status = status , nids = nids ) :
report = task . get_event_report ( )
if report :
print ( make_banner ( str ( task ) , width = ncols , mark = "=" ) )
print ( report )
|
def _get_installed ( self ) :
"""Gets a list of the file paths to repo settings files that are
being monitored by the CI server ."""
|
from utility import get_json
# This is a little tricky because the data file doesn ' t just have a list
# of installed servers . It also manages the script ' s database that tracks
# the user ' s interactions with it .
fulldata = get_json ( self . instpath , { } )
if "installed" in fulldata :
return fulldata [ "installed" ]
else :
return [ ]
|
def digicam_configure_send ( self , target_system , target_component , mode , shutter_speed , aperture , iso , exposure_type , command_id , engine_cut_off , extra_param , extra_value , force_mavlink1 = False ) :
'''Configure on - board Camera Control System .
target _ system : System ID ( uint8 _ t )
target _ component : Component ID ( uint8 _ t )
mode : Mode enumeration from 1 to N / / P , TV , AV , M , Etc ( 0 means ignore ) ( uint8 _ t )
shutter _ speed : Divisor number / / e . g . 1000 means 1/1000 ( 0 means ignore ) ( uint16 _ t )
aperture : F stop number x 10 / / e . g . 28 means 2.8 ( 0 means ignore ) ( uint8 _ t )
iso : ISO enumeration from 1 to N / / e . g . 80 , 100 , 200 , Etc ( 0 means ignore ) ( uint8 _ t )
exposure _ type : Exposure type enumeration from 1 to N ( 0 means ignore ) ( uint8 _ t )
command _ id : Command Identity ( incremental loop : 0 to 255 ) / / A command sent multiple times will be executed or pooled just once ( uint8 _ t )
engine _ cut _ off : Main engine cut - off time before camera trigger in seconds / 10 ( 0 means no cut - off ) ( uint8 _ t )
extra _ param : Extra parameters enumeration ( 0 means ignore ) ( uint8 _ t )
extra _ value : Correspondent value to given extra _ param ( float )'''
|
return self . send ( self . digicam_configure_encode ( target_system , target_component , mode , shutter_speed , aperture , iso , exposure_type , command_id , engine_cut_off , extra_param , extra_value ) , force_mavlink1 = force_mavlink1 )
|
def _find_export ( self , func ) : # type : ( Callable [ [ Tuple [ Any , EndpointDescription ] ] , bool ] ) - > Optional [ Tuple [ Any , EndpointDescription ] ]
"""Look for an export using the given lookup method
The lookup method must accept a single parameter , which is a tuple
containing a service instance and endpoint description .
: param func : A function to look for the excepted export
: return : The found tuple or None"""
|
with self . _exported_instances_lock :
for val in self . _exported_services . values ( ) :
if func ( val ) :
return val
return None
|
def highlight ( __text : str , * , lexer : str = 'diff' , formatter : str = 'terminal' ) -> str :
"""Highlight text highlighted using ` ` pygments ` ` .
Returns text untouched if colour output is not enabled .
See also : : pypi : ` Pygments `
Args :
_ _ text : Text to highlight
lexer : Jinja lexer to use
formatter : Jinja formatter to use
Returns :
Syntax highlighted output , when possible"""
|
if sys . stdout . isatty ( ) :
lexer = get_lexer_by_name ( lexer )
formatter = get_formatter_by_name ( formatter )
__text = pyg_highlight ( __text , lexer , formatter )
return __text
|
def start ( queue , profile = None , tag = 'salt/engine/sqs' , owner_acct_id = None ) :
'''Listen to sqs and fire message on event bus'''
|
if __opts__ . get ( '__role' ) == 'master' :
fire_master = salt . utils . event . get_master_event ( __opts__ , __opts__ [ 'sock_dir' ] , listen = False ) . fire_event
else :
fire_master = __salt__ [ 'event.send' ]
message_format = __opts__ . get ( 'sqs.message_format' , None )
sqs = _get_sqs_conn ( profile )
q = None
while True :
if not q :
q = sqs . get_queue ( queue , owner_acct_id = owner_acct_id )
q . set_message_class ( boto . sqs . message . RawMessage )
_process_queue ( q , queue , fire_master , tag = tag , owner_acct_id = owner_acct_id , message_format = message_format )
|
def access_elementusers ( self , elementuser_id , access_id = None , tenant_id = None , api_version = "v2.0" ) :
"""Get all accesses for a particular user
* * Parameters : * * :
- * * elementuser _ id * * : Element User ID
- * * access _ id * * : ( optional ) Access ID
- * * tenant _ id * * : Tenant ID
- * * api _ version * * : API version to use ( default v2.0)
* * Returns : * * requests . Response object extended with cgx _ status and cgx _ content properties ."""
|
if tenant_id is None and self . _parent_class . tenant_id : # Pull tenant _ id from parent namespace cache .
tenant_id = self . _parent_class . tenant_id
elif not tenant_id : # No value for tenant _ id .
raise TypeError ( "tenant_id is required but not set or cached." )
cur_ctlr = self . _parent_class . controller
if not access_id :
url = str ( cur_ctlr ) + "/{}/api/tenants/{}/elementusers/{}/access" . format ( api_version , tenant_id , elementuser_id )
else :
url = str ( cur_ctlr ) + "/{}/api/tenants/{}/elementusers/{}/access/{}" . format ( api_version , tenant_id , elementuser_id , access_id )
api_logger . debug ( "URL = %s" , url )
return self . _parent_class . rest_call ( url , "get" )
|
def go_to_line ( self , line = None ) :
"""Go to line dialog"""
|
if line is not None : # When this method is called from the flileswitcher , a line
# number is specified , so there is no need for the dialog .
self . get_current_editor ( ) . go_to_line ( line )
else :
if self . data :
self . get_current_editor ( ) . exec_gotolinedialog ( )
|
def _newIdentifier ( self ) :
"""Make a new identifier for an as - yet uncreated model object .
@ rtype : C { int }"""
|
id = self . _allocateID ( )
self . _idsToObjects [ id ] = self . _NO_OBJECT_MARKER
self . _lastValues [ id ] = None
return id
|
def requires_list ( self ) :
"""It is important that this property is calculated lazily . Getting the
' requires ' attribute may trigger a package load , which may be avoided if
this variant is reduced away before that happens ."""
|
requires = self . variant . get_requires ( build_requires = self . building )
reqlist = RequirementList ( requires )
if reqlist . conflict :
raise ResolveError ( "The package %s has an internal requirements conflict: %s" % ( str ( self ) , str ( reqlist ) ) )
return reqlist
|
async def create ( source_id : str , attrs : dict , cred_def_handle : int , name : str , price : str ) :
"""Creates a Class representing an Issuer Credential
: param source _ id : Tag associated by user of sdk
: param attrs : attributes that will form the credential
: param cred _ def _ handle : Handle from previously created credential def object
: param name : Name given to the Credential
: param price : Price , in tokens , required as payment for the issuance of the credential .
Example :
source _ id = ' 1'
cred _ def _ handle = 1
attrs = { ' key ' : ' value ' , ' key2 ' : ' value2 ' , ' key3 ' : ' value3 ' }
name = ' Credential Name '
issuer _ did = ' 8XFh8yBzrpJQmNyZzgoTqB '
phone _ number = ' 8019119191'
price = 1
issuer _ credential = await IssuerCredential . create ( source _ id , attrs , cred _ def _ handle , name , price )"""
|
constructor_params = ( source_id , attrs , cred_def_handle , name , price )
c_source_id = c_char_p ( source_id . encode ( 'utf-8' ) )
c_cred_def_handle = c_uint32 ( cred_def_handle )
c_price = c_char_p ( price . encode ( 'utf-8' ) )
# default institution _ did in config is used as issuer _ did
c_issuer_did = None
c_data = c_char_p ( json . dumps ( attrs ) . encode ( 'utf-8' ) )
c_name = c_char_p ( name . encode ( 'utf-8' ) )
c_params = ( c_source_id , c_cred_def_handle , c_issuer_did , c_data , c_name , c_price )
return await IssuerCredential . _create ( "vcx_issuer_create_credential" , constructor_params , c_params )
|
def _send_string_selection ( self , string : str ) :
"""Use the mouse selection clipboard to send a string ."""
|
backup = self . clipboard . selection
# Keep a backup of current content , to restore the original afterwards .
if backup is None :
logger . warning ( "Tried to backup the X PRIMARY selection content, but got None instead of a string." )
self . clipboard . selection = string
self . __enqueue ( self . _paste_using_mouse_button_2 )
self . __enqueue ( self . _restore_clipboard_selection , backup )
|
def create_q ( token ) :
"""Creates the Q ( ) object ."""
|
meta = getattr ( token , 'meta' , None )
query = getattr ( token , 'query' , '' )
wildcards = None
if isinstance ( query , six . string_types ) : # Unicode - > Quoted string
search = query
else : # List - > No quoted string ( possible wildcards )
if len ( query ) == 1 :
search = query [ 0 ]
elif len ( query ) == 3 :
wildcards = 'BOTH'
search = query [ 1 ]
elif len ( query ) == 2 :
if query [ 0 ] == '*' :
wildcards = 'START'
search = query [ 1 ]
else :
wildcards = 'END'
search = query [ 0 ]
# Ignore short term and stop words
if ( len ( search ) < 3 and not search . isdigit ( ) ) or search in STOP_WORDS :
return Q ( )
if not meta :
q = Q ( )
for field in SEARCH_FIELDS :
q |= Q ( ** { '%s__icontains' % field : search } )
return q
if meta == 'category' :
if wildcards == 'BOTH' :
return ( Q ( categories__title__icontains = search ) | Q ( categories__slug__icontains = search ) )
elif wildcards == 'START' :
return ( Q ( categories__title__iendswith = search ) | Q ( categories__slug__iendswith = search ) )
elif wildcards == 'END' :
return ( Q ( categories__title__istartswith = search ) | Q ( categories__slug__istartswith = search ) )
else :
return ( Q ( categories__title__iexact = search ) | Q ( categories__slug__iexact = search ) )
elif meta == 'author' :
if wildcards == 'BOTH' :
return Q ( ** { 'authors__%s__icontains' % Author . USERNAME_FIELD : search } )
elif wildcards == 'START' :
return Q ( ** { 'authors__%s__iendswith' % Author . USERNAME_FIELD : search } )
elif wildcards == 'END' :
return Q ( ** { 'authors__%s__istartswith' % Author . USERNAME_FIELD : search } )
else :
return Q ( ** { 'authors__%s__iexact' % Author . USERNAME_FIELD : search } )
elif meta == 'tag' : # TODO : tags ignore wildcards
return Q ( tags__icontains = search )
|
def _filter ( self , query , ** kwargs ) :
"""Filter a query with user - supplied arguments ."""
|
query = self . _auto_filter ( query , ** kwargs )
return query
|
def drawPoint ( self , x , y , silent = True ) :
"""Draws a point on the current : py : class : ` Layer ` with the current : py : class : ` Brush ` .
Coordinates are relative to the original layer size WITHOUT downsampling applied .
: param x1 : Point X coordinate .
: param y1 : Point Y coordinate .
: rtype : Nothing ."""
|
start = time . time ( )
# Downsample the coordinates
x = int ( x / config . DOWNSAMPLING )
y = int ( y / config . DOWNSAMPLING )
# Apply the dab with or without source caching
if self . brush . usesSourceCaching :
applyMirroredDab_jit ( self . mirrorMode , self . image . getActiveLayer ( ) . data , int ( x - self . brush . brushSize * 0.5 ) , int ( y - self . brush . brushSize * 0.5 ) , self . brush . coloredBrushSource . copy ( ) , config . CANVAS_SIZE , self . brush . brushMask )
else :
self . brush . makeDab ( self . image . getActiveLayer ( ) , int ( x ) , int ( y ) , self . color , self . secondColor , mirror = self . mirrorMode )
config . AVGTIME . append ( time . time ( ) - start )
|
def _draw_fold_indicator ( self , top , mouse_over , collapsed , painter ) :
"""Draw the fold indicator / trigger ( arrow ) .
: param top : Top position
: param mouse _ over : Whether the mouse is over the indicator
: param collapsed : Whether the trigger is collapsed or not .
: param painter : QPainter"""
|
rect = QRect ( 0 , top , self . sizeHint ( ) . width ( ) , self . sizeHint ( ) . height ( ) )
if self . _native_icons :
opt = QStyleOptionViewItem ( )
opt . rect = rect
opt . state = ( QStyle . State_Active | QStyle . State_Item | QStyle . State_Children )
if not collapsed :
opt . state |= QStyle . State_Open
if mouse_over :
opt . state |= ( QStyle . State_MouseOver | QStyle . State_Enabled | QStyle . State_Selected )
opt . palette . setBrush ( QPalette . Window , self . palette ( ) . highlight ( ) )
opt . rect . translate ( - 2 , 0 )
self . style ( ) . drawPrimitive ( QStyle . PE_IndicatorBranch , opt , painter , self )
else :
index = 0
if not collapsed :
index = 2
if mouse_over :
index += 1
ima . icon ( self . _indicators_icons [ index ] ) . paint ( painter , rect )
|
def as_dot ( self ) -> str :
"""Return as a string the dot version of the graph ."""
|
return nx . drawing . nx_pydot . to_pydot ( self . _graph ) . to_string ( )
|
def _cont_norm_running_quantile_mp ( wl , fluxes , ivars , q , delta_lambda , n_proc = 2 , verbose = False ) :
"""The same as _ cont _ norm _ running _ quantile ( ) above ,
but using multi - processing .
Bo Zhang ( NAOC )"""
|
nStar = fluxes . shape [ 0 ]
# start mp . Pool
mp_results = [ ]
pool = mp . Pool ( processes = n_proc )
for i in xrange ( nStar ) :
mp_results . append ( pool . apply_async ( _find_cont_running_quantile , ( wl , fluxes [ i , : ] . reshape ( ( 1 , - 1 ) ) , ivars [ i , : ] . reshape ( ( 1 , - 1 ) ) , q , delta_lambda ) , { 'verbose' : False } ) )
if verbose :
print ( '@Bo Zhang: continuum normalizing star [%d/%d] ...' % ( i + 1 , nStar ) )
# close mp . Pool
pool . close ( )
pool . join ( )
# reshape results - - > cont
cont = np . zeros_like ( fluxes )
for i in xrange ( nStar ) :
cont [ i , : ] = mp_results [ i ] . get ( )
# . flatten ( )
norm_fluxes = np . ones ( fluxes . shape )
norm_fluxes [ cont != 0 ] = fluxes [ cont != 0 ] / cont [ cont != 0 ]
norm_ivars = cont ** 2 * ivars
print ( '@Bo Zhang: continuum normalization finished!' )
return norm_fluxes , norm_ivars
|
def Logout ( self , dumpXml = None ) :
"""Logout method disconnects from UCS ."""
|
from UcsBase import UcsException
if ( self . _cookie == None ) :
return True
if self . _refreshTimer :
self . _refreshTimer . cancel ( )
response = self . AaaLogout ( dumpXml )
self . _cookie = None
self . _lastUpdateTime = str ( time . asctime ( ) )
self . _domains = None
self . _priv = None
self . _sessionId = None
self . _version = None
if self . _ucs in defaultUcs :
del defaultUcs [ self . _ucs ]
if ( response . errorCode != 0 ) :
raise UcsException ( response . errorCode , response . errorDescr )
# raise Exception ( ' [ Error ] : Logout [ Code ] : ' + response . errorCode + ' [ Description ] : ' + response . errorDescr )
return True
|
def SortBy ( * qs ) :
"""Convert a list of Q objects into list of sort instructions"""
|
sort = [ ]
for q in qs :
if q . _path . endswith ( '.desc' ) :
sort . append ( ( q . _path [ : - 5 ] , DESCENDING ) )
else :
sort . append ( ( q . _path , ASCENDING ) )
return sort
|
def plugin_for ( cls , model ) :
'''Find and return a plugin for this model . Uses inheritance to find a model where the plugin is registered .'''
|
logger . debug ( "Getting a plugin for: %s" , model )
if not issubclass ( model , Model ) :
return
if model in cls . plugins :
return cls . plugins [ model ]
for b in model . __bases__ :
p = cls . plugin_for ( b )
if p :
return p
|
def compress_ranges_to_lists ( self ) :
'''Converts the internal dimension ranges on lists into
list of the restricted size . Thus all dimension rules
are applied to all dimensions of the list wrapper
and returned as a list ( of lists ) .'''
|
clist = [ ]
for elem in self :
if isinstance ( elem , FixedListSubset ) :
clist . append ( elem . compress_ranges_to_lists ( ) )
else :
clist . append ( elem )
return clist
|
def draw_light_2d_linear ( self , kwargs_list , n = 1 , new_compute = False , r_eff = 1. ) :
"""constructs the CDF and draws from it random realizations of projected radii R
: param kwargs _ list :
: return :"""
|
if not hasattr ( self , '_light_cdf' ) or new_compute is True :
r_array = np . linspace ( self . _min_interpolate , self . _max_interpolate , self . _interp_grid_num )
cum_sum = np . zeros_like ( r_array )
sum = 0
for i , r in enumerate ( r_array ) :
if i == 0 :
cum_sum [ i ] = 0
else :
sum += self . light_2d ( r , kwargs_list ) * r
cum_sum [ i ] = copy . deepcopy ( sum )
cum_sum_norm = cum_sum / cum_sum [ - 1 ]
f = interp1d ( cum_sum_norm , r_array )
self . _light_cdf = f
cdf_draw = np . random . uniform ( 0. , 1 , n )
r_draw = self . _light_cdf ( cdf_draw )
return r_draw
|
def insertBefore ( self , newchild , refchild ) :
"""Insert a new child node before an existing child . It must
be the case that refchild is a child of this node ; if not ,
ValueError is raised . newchild is returned ."""
|
for i , childNode in enumerate ( self . childNodes ) :
if childNode is refchild :
self . childNodes . insert ( i , newchild )
newchild . parentNode = self
self . _verifyChildren ( i )
return newchild
raise ValueError ( refchild )
|
def connect ( self , keyfile = None ) :
"""Connect to the node via ssh using the paramiko library .
: return : : py : class : ` paramiko . SSHClient ` - ssh connection or None on
failure"""
|
ssh = paramiko . SSHClient ( )
ssh . set_missing_host_key_policy ( paramiko . AutoAddPolicy ( ) )
if keyfile and os . path . exists ( keyfile ) :
ssh . load_host_keys ( keyfile )
# Try connecting using the ` preferred _ ip ` , if
# present . Otherwise , try all of them and set ` preferred _ ip `
# using the first that is working .
ips = self . ips [ : ]
# This is done in order to " sort " the IPs and put the preferred _ ip first .
if self . preferred_ip :
if self . preferred_ip in ips :
ips . remove ( self . preferred_ip )
else : # Preferred is changed ?
log . debug ( "IP %s does not seem to belong to %s anymore. Ignoring!" , self . preferred_ip , self . name )
self . preferred_ip = ips [ 0 ]
for ip in itertools . chain ( [ self . preferred_ip ] , ips ) :
if not ip :
continue
try :
log . debug ( "Trying to connect to host %s (%s)" , self . name , ip )
addr , port = parse_ip_address_and_port ( ip , SSH_PORT )
ssh . connect ( str ( addr ) , username = self . image_user , allow_agent = True , key_filename = self . user_key_private , timeout = Node . connection_timeout , port = port )
log . debug ( "Connection to %s succeeded on port %d!" , ip , port )
if ip != self . preferred_ip :
log . debug ( "Setting `preferred_ip` to %s" , ip )
self . preferred_ip = ip
# Connection successful .
return ssh
except socket . error as ex :
log . debug ( "Host %s (%s) not reachable: %s." , self . name , ip , ex )
except paramiko . BadHostKeyException as ex :
log . error ( "Invalid host key: host %s (%s); check keyfile: %s" , self . name , ip , keyfile )
except paramiko . SSHException as ex :
log . debug ( "Ignoring error %s connecting to %s" , str ( ex ) , self . name )
return None
|
def _process_diseases ( self , limit = None ) :
"""This method processes the KEGG disease IDs .
Triples created :
< disease _ id > is a class
< disease _ id > rdfs : label < disease _ name >
: param limit :
: return :"""
|
LOG . info ( "Processing diseases" )
if self . test_mode :
graph = self . testgraph
else :
graph = self . graph
line_counter = 0
model = Model ( graph )
raw = '/' . join ( ( self . rawdir , self . files [ 'disease' ] [ 'file' ] ) )
with open ( raw , 'r' , encoding = "iso-8859-1" ) as csvfile :
filereader = csv . reader ( csvfile , delimiter = '\t' , quotechar = '\"' )
for row in filereader :
line_counter += 1
( disease_id , disease_name ) = row
disease_id = 'KEGG-' + disease_id . strip ( )
if disease_id not in self . label_hash :
self . label_hash [ disease_id ] = disease_name
if self . test_mode and disease_id not in self . test_ids [ 'disease' ] :
continue
# Add the disease as a class .
# we don ' t get all of these from MONDO yet see :
# https : / / github . com / monarch - initiative / human - disease - ontology / issues / 3
model . addClassToGraph ( disease_id , disease_name )
# not typing the diseases as DOID : 4 yet because
# I don ' t want to bulk up the graph unnecessarily
if not self . test_mode and ( limit is not None and line_counter > limit ) :
break
LOG . info ( "Done with diseases" )
return
|
def append_text ( self , txt ) :
"""adds a line of text to a file"""
|
with open ( self . fullname , "a" ) as myfile :
myfile . write ( txt )
|
def generic_request ( self , method , uri , all_pages = False , data_key = None , no_data = False , do_not_process = False , force_urlencode_data = False , data = None , params = None , files = None , single_item = False ) :
"""Generic Canvas Request Method ."""
|
if not uri . startswith ( 'http' ) :
uri = self . uri_for ( uri )
if force_urlencode_data is True :
uri += '?' + urllib . urlencode ( data )
assert method in [ 'GET' , 'POST' , 'PUT' , 'DELETE' , 'HEAD' , 'OPTIONS' ]
if method == 'GET' :
response = self . session . get ( uri , params = params )
elif method == 'POST' :
response = self . session . post ( uri , data = data , files = files )
elif method == 'PUT' :
response = self . session . put ( uri , data = data )
elif method == 'DELETE' :
response = self . session . delete ( uri , params = params )
elif method == 'HEAD' :
response = self . session . head ( uri , params = params )
elif method == 'OPTIONS' :
response = self . session . options ( uri , params = params )
response . raise_for_status ( )
if do_not_process is True :
return response
if no_data :
return response . status_code
if all_pages :
return self . depaginate ( response , data_key )
if single_item :
r = response . json ( )
if data_key :
return r [ data_key ]
else :
return r
return response . json ( )
|
def delete_collection_csi_driver ( self , ** kwargs ) :
"""delete collection of CSIDriver
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . delete _ collection _ csi _ driver ( async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1Status
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . delete_collection_csi_driver_with_http_info ( ** kwargs )
else :
( data ) = self . delete_collection_csi_driver_with_http_info ( ** kwargs )
return data
|
def run_rep ( self , params , rep ) :
"""run a single repetition including directory creation , log files , etc ."""
|
try :
name = params [ 'name' ]
fullpath = os . path . join ( params [ 'path' ] , params [ 'name' ] )
logname = os . path . join ( fullpath , '%i.log' % rep )
# check if repetition exists and has been completed
restore = 0
if os . path . exists ( logname ) :
logfile = open ( logname , 'r' )
lines = logfile . readlines ( )
logfile . close ( )
# if completed , continue loop
if 'iterations' in params and len ( lines ) == params [ 'iterations' ] :
return False
# if not completed , check if restore _ state is supported
if not self . restore_supported : # not supported , delete repetition and start over
# print ' restore not supported , deleting % s ' % logname
os . remove ( logname )
restore = 0
else :
restore = len ( lines )
self . reset ( params , rep )
if restore :
logfile = open ( logname , 'a' )
self . restore_state ( params , rep , restore )
else :
logfile = open ( logname , 'w' )
# loop through iterations and call iterate
for it in xrange ( restore , params [ 'iterations' ] ) :
dic = self . iterate ( params , rep , it ) or { }
dic [ 'iteration' ] = it
if self . restore_supported :
self . save_state ( params , rep , it )
if dic is not None :
json . dump ( dic , logfile )
logfile . write ( '\n' )
logfile . flush ( )
logfile . close ( )
self . finalize ( params , rep )
except :
import traceback
traceback . print_exc ( )
raise
|
def destroy ( vm_name , call = None ) :
'''Call ' destroy ' on the instance . Can be called with " - a destroy " or - d
CLI Example :
. . code - block : : bash
salt - cloud - a destroy myinstance1 myinstance2 . . .
salt - cloud - d myinstance1 myinstance2 . . .'''
|
if call and call != 'action' :
raise SaltCloudSystemExit ( 'The destroy action must be called with -d or "-a destroy".' )
conn = get_conn ( )
try :
node = conn . ex_get_node ( vm_name )
except Exception as exc : # pylint : disable = W0703
log . error ( 'Could not locate instance %s\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n%s' , vm_name , exc , exc_info_on_loglevel = logging . DEBUG )
raise SaltCloudSystemExit ( 'Could not find instance {0}.' . format ( vm_name ) )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'delete instance' , 'salt/cloud/{0}/deleting' . format ( vm_name ) , args = { 'name' : vm_name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
# Use the instance metadata to see if its salt cloud profile was
# preserved during instance create . If so , use the profile value
# to see if the ' delete _ boot _ pd ' value is set to delete the disk
# along with the instance .
profile = None
if node . extra [ 'metadata' ] and 'items' in node . extra [ 'metadata' ] :
for md in node . extra [ 'metadata' ] [ 'items' ] :
if md [ 'key' ] == 'salt-cloud-profile' :
profile = md [ 'value' ]
vm_ = get_configured_provider ( )
delete_boot_pd = False
if profile and profile in vm_ [ 'profiles' ] and 'delete_boot_pd' in vm_ [ 'profiles' ] [ profile ] :
delete_boot_pd = vm_ [ 'profiles' ] [ profile ] [ 'delete_boot_pd' ]
try :
inst_deleted = conn . destroy_node ( node )
except Exception as exc : # pylint : disable = W0703
log . error ( 'Could not destroy instance %s\n\n' 'The following exception was thrown by libcloud when trying to ' 'run the initial deployment: \n%s' , vm_name , exc , exc_info_on_loglevel = logging . DEBUG )
raise SaltCloudSystemExit ( 'Could not destroy instance {0}.' . format ( vm_name ) )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'delete instance' , 'salt/cloud/{0}/deleted' . format ( vm_name ) , args = { 'name' : vm_name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
if delete_boot_pd :
log . info ( 'delete_boot_pd is enabled for the instance profile, ' 'attempting to delete disk' )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'delete disk' , 'salt/cloud/disk/deleting' , args = { 'name' : vm_name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
try :
conn . destroy_volume ( conn . ex_get_volume ( vm_name ) )
except Exception as exc : # pylint : disable = W0703
# Note that we don ' t raise a SaltCloudSystemExit here in order
# to allow completion of instance deletion . Just log the error
# and keep going .
log . error ( 'Could not destroy disk %s\n\n' 'The following exception was thrown by libcloud when trying ' 'to run the initial deployment: \n%s' , vm_name , exc , exc_info_on_loglevel = logging . DEBUG )
__utils__ [ 'cloud.fire_event' ] ( 'event' , 'deleted disk' , 'salt/cloud/disk/deleted' , args = { 'name' : vm_name } , sock_dir = __opts__ [ 'sock_dir' ] , transport = __opts__ [ 'transport' ] )
if __opts__ . get ( 'update_cachedir' , False ) is True :
__utils__ [ 'cloud.delete_minion_cachedir' ] ( vm_name , __active_provider_name__ . split ( ':' ) [ 0 ] , __opts__ )
return inst_deleted
|
def get_word_break_property ( value , is_bytes = False ) :
"""Get ` WORD BREAK ` property ."""
|
obj = unidata . ascii_word_break if is_bytes else unidata . unicode_word_break
if value . startswith ( '^' ) :
negated = value [ 1 : ]
value = '^' + unidata . unicode_alias [ 'wordbreak' ] . get ( negated , negated )
else :
value = unidata . unicode_alias [ 'wordbreak' ] . get ( value , value )
return obj [ value ]
|
def dbStore ( self , typ , py_value ) :
"""Prepares to store this column for the a particular backend database .
: param backend : < orb . Database >
: param py _ value : < variant >
: param context : < orb . Context >
: return : < variant >"""
|
# convert base types to work in the database
if isinstance ( py_value , ( list , tuple , set ) ) :
py_value = tuple ( ( self . dbStore ( x ) for x in py_value ) )
elif isinstance ( py_value , orb . Collection ) :
py_value = py_value . ids ( )
elif isinstance ( py_value , orb . Model ) :
py_value = py_value . id ( )
return py_value
|
def move ( self , x , y ) :
"""Move window top - left corner to position"""
|
SetWindowPos ( self . _hwnd , None , x , y , 0 , 0 , SWP_NOSIZE )
|
def tune ( self , verbose = None ) :
"""Tuning initial slice width parameter"""
|
if not self . _tune :
return False
else :
self . w_tune . append ( abs ( self . stochastic . last_value - self . stochastic . value ) )
self . w = 2 * ( sum ( self . w_tune ) / len ( self . w_tune ) )
return True
|
def from_ ( self , selectable ) :
"""Adds a table to the query . This function can only be called once and will raise an AttributeError if called a
second time .
: param selectable :
Type : ` ` Table ` ` , ` ` Query ` ` , or ` ` str ` `
When a ` ` str ` ` is passed , a table with the name matching the ` ` str ` ` value is used .
: returns
A copy of the query with the table added ."""
|
self . _from . append ( Table ( selectable ) if isinstance ( selectable , str ) else selectable )
if isinstance ( selectable , ( QueryBuilder , _UnionQuery ) ) and selectable . alias is None :
if isinstance ( selectable , QueryBuilder ) :
sub_query_count = selectable . _subquery_count
else :
sub_query_count = 0
sub_query_count = max ( self . _subquery_count , sub_query_count )
selectable . alias = 'sq%d' % sub_query_count
self . _subquery_count = sub_query_count + 1
|
def skip_count ( self ) :
"""Amount of skipped test cases in this list .
: return : integer"""
|
return len ( [ i for i , result in enumerate ( self . data ) if result . skip ] )
|
def close ( self ) :
"""Shutdown and free all resources ."""
|
if self . _controller is not None :
self . _controller . quit ( )
self . _controller = None
if self . _process is not None :
self . _process . close ( )
self . _process = None
|
def cov_from_scales ( self , scales ) :
"""Return a covariance matrix built from a dictionary of scales .
` scales ` is a dictionary keyed by stochastic instances , and the
values refer are the variance of the jump distribution for each
stochastic . If a stochastic is a sequence , the variance must
have the same length ."""
|
# Get array of scales
ord_sc = [ ]
for stochastic in self . stochastics :
ord_sc . append ( np . ravel ( scales [ stochastic ] ) )
ord_sc = np . concatenate ( ord_sc )
if np . squeeze ( ord_sc ) . shape [ 0 ] != self . dim :
raise ValueError ( "Improper initial scales, dimension don't match" , ( np . squeeze ( ord_sc ) , self . dim ) )
# Scale identity matrix
return np . eye ( self . dim ) * ord_sc
|
def concentric_hexagons ( radius , start = ( 0 , 0 ) ) :
"""A generator which produces coordinates of concentric rings of hexagons .
Parameters
radius : int
Number of layers to produce ( 0 is just one hexagon )
start : ( x , y )
The coordinate of the central hexagon ."""
|
x , y = start
yield ( x , y )
for r in range ( 1 , radius + 1 ) : # Move to the next layer
y -= 1
# Walk around the hexagon of this radius
for dx , dy in [ ( 1 , 1 ) , ( 0 , 1 ) , ( - 1 , 0 ) , ( - 1 , - 1 ) , ( 0 , - 1 ) , ( 1 , 0 ) ] :
for _ in range ( r ) :
yield ( x , y )
x += dx
y += dy
|
def _reaction_representer ( dumper , data ) :
"""Generate a parsable reaction representation to the YAML parser .
Check the number of compounds in the reaction , if it is larger than 10,
then transform the reaction data into a list of directories with all
attributes in the reaction ; otherwise , just return the text _ type format
of the reaction data ."""
|
if len ( data . compounds ) > _MAX_REACTION_LENGTH :
def dict_make ( compounds ) :
for compound , value in compounds :
yield OrderedDict ( [ ( 'id' , text_type ( compound . name ) ) , ( 'compartment' , compound . compartment ) , ( 'value' , value ) ] )
left = list ( dict_make ( data . left ) )
right = list ( dict_make ( data . right ) )
direction = data . direction == Direction . Both
reaction = OrderedDict ( )
reaction [ 'reversible' ] = direction
if data . direction == Direction . Reverse :
reaction [ 'left' ] = right
reaction [ 'right' ] = left
else :
reaction [ 'left' ] = left
reaction [ 'right' ] = right
return dumper . represent_data ( reaction )
else :
return _represent_text_type ( dumper , text_type ( data ) )
|
def get_message ( self , set_slave_ok , is_mongos , use_cmd = False ) :
"""Get a query message , possibly setting the slaveOk bit ."""
|
if set_slave_ok : # Set the slaveOk bit .
flags = self . flags | 4
else :
flags = self . flags
ns = _UJOIN % ( self . db , self . coll )
spec = self . spec
if use_cmd :
ns = _UJOIN % ( self . db , "$cmd" )
spec = self . as_command ( ) [ 0 ]
ntoreturn = - 1
# All DB commands return 1 document
else : # OP _ QUERY treats ntoreturn of - 1 and 1 the same , return
# one document and close the cursor . We have to use 2 for
# batch size if 1 is specified .
ntoreturn = self . batch_size == 1 and 2 or self . batch_size
if self . limit :
if ntoreturn :
ntoreturn = min ( self . limit , ntoreturn )
else :
ntoreturn = self . limit
if is_mongos :
spec = _maybe_add_read_preference ( spec , self . read_preference )
return query ( flags , ns , self . ntoskip , ntoreturn , spec , self . fields , self . codec_options )
|
def add_formdata ( self , content = None ) : # type : ( Optional [ Dict [ str , str ] ] ) - > None
"""Add data as a multipart form - data request to the request .
We only deal with file - like objects or strings at this point .
The requests is not yet streamed .
: param dict headers : Any headers to add to the request .
: param dict content : Dictionary of the fields of the formdata ."""
|
if content is None :
content = { }
content_type = self . headers . pop ( 'Content-Type' , None ) if self . headers else None
if content_type and content_type . lower ( ) == 'application/x-www-form-urlencoded' : # Do NOT use " add _ content " that assumes input is JSON
self . data = { f : d for f , d in content . items ( ) if d is not None }
else : # Assume " multipart / form - data "
self . files = { f : self . _format_data ( d ) for f , d in content . items ( ) if d is not None }
|
def await_reservations ( self , sc , status = { } , timeout = 600 ) :
"""Block until all reservations are received ."""
|
timespent = 0
while not self . reservations . done ( ) :
logging . info ( "waiting for {0} reservations" . format ( self . reservations . remaining ( ) ) )
# check status flags for any errors
if 'error' in status :
sc . cancelAllJobs ( )
sc . stop ( )
sys . exit ( 1 )
time . sleep ( 1 )
timespent += 1
if ( timespent > timeout ) :
raise Exception ( "timed out waiting for reservations to complete" )
logging . info ( "all reservations completed" )
return self . reservations . get ( )
|
def headercontent ( self , method ) :
"""Get the content for the SOAP I { Header } node .
@ param method : A service method .
@ type method : I { service . Method }
@ return : The XML content for the < body / > .
@ rtype : [ L { Element } , . . . ]"""
|
content = [ ]
wsse = self . options ( ) . wsse
if wsse is not None :
content . append ( wsse . xml ( ) )
headers = self . options ( ) . soapheaders
if not isinstance ( headers , ( tuple , list , dict ) ) :
headers = ( headers , )
elif not headers :
return content
pts = self . headpart_types ( method )
if isinstance ( headers , ( tuple , list ) ) :
n = 0
for header in headers :
if isinstance ( header , Element ) :
content . append ( deepcopy ( header ) )
continue
if len ( pts ) == n :
break
h = self . mkheader ( method , pts [ n ] , header )
ns = pts [ n ] [ 1 ] . namespace ( "ns0" )
h . setPrefix ( ns [ 0 ] , ns [ 1 ] )
content . append ( h )
n += 1
else :
for pt in pts :
header = headers . get ( pt [ 0 ] )
if header is None :
continue
h = self . mkheader ( method , pt , header )
ns = pt [ 1 ] . namespace ( "ns0" )
h . setPrefix ( ns [ 0 ] , ns [ 1 ] )
content . append ( h )
return content
|
def set_chime ( self , sound , cycles = None ) :
""": param sound : a str , one of [ " doorbell " , " fur _ elise " , " doorbell _ extended " , " alert " ,
" william _ tell " , " rondo _ alla _ turca " , " police _ siren " ,
" " evacuation " , " beep _ beep " , " beep " , " inactive " ]
: param cycles : Undocumented seems to have no effect ?
: return : nothing"""
|
desired_state = { "activate_chime" : sound }
if cycles is not None :
desired_state . update ( { "chime_cycles" : cycles } )
response = self . api_interface . set_device_state ( self , { "desired_state" : desired_state } )
self . _update_state_from_response ( response )
|
def _set_factory_context ( factory_class , bundle_context ) : # type : ( type , Optional [ BundleContext ] ) - > Optional [ FactoryContext ]
"""Transforms the context data dictionary into its FactoryContext object form .
: param factory _ class : A manipulated class
: param bundle _ context : The class bundle context
: return : The factory context , None on error"""
|
try : # Try to get the factory context ( built using decorators )
context = getattr ( factory_class , constants . IPOPO_FACTORY_CONTEXT )
except AttributeError : # The class has not been manipulated , or too badly
return None
if not context . completed : # Partial context ( class not manipulated )
return None
# Associate the factory to the bundle context
context . set_bundle_context ( bundle_context )
return context
|
def execute_on_entries ( self , entry_processor , predicate = None ) :
"""Applies the user defined EntryProcessor to all the entries in the map or entries in the map which satisfies
the predicate if provided . Returns the results mapped by each key in the map .
: param entry _ processor : ( object ) , A stateful serializable object which represents the EntryProcessor defined on
server side .
This object must have a serializable EntryProcessor counter part registered on server side with the actual
` ` org . hazelcast . map . EntryProcessor ` ` implementation .
: param predicate : ( Predicate ) , predicate for filtering the entries ( optional ) .
: return : ( Sequence ) , list of map entries which includes the keys and the results of the entry process .
. . seealso : : : class : ` ~ hazelcast . serialization . predicate . Predicate ` for more info about predicates ."""
|
if predicate :
return self . _encode_invoke ( map_execute_with_predicate_codec , entry_processor = self . _to_data ( entry_processor ) , predicate = self . _to_data ( predicate ) )
return self . _encode_invoke ( map_execute_on_all_keys_codec , entry_processor = self . _to_data ( entry_processor ) )
|
def add_send_message ( self , connection , send_message ) :
"""Adds a send _ message function to the Dispatcher ' s
dictionary of functions indexed by connection .
Args :
connection ( str ) : A locally unique identifier
provided by the receiver of messages .
send _ message ( fn ) : The method that should be called
by the dispatcher to respond to messages which
arrive via connection ."""
|
self . _send_message [ connection ] = send_message
LOGGER . debug ( "Added send_message function " "for connection %s" , connection )
|
def read_gaf ( fin_gaf , prt = sys . stdout , hdr_only = False , allow_missing_symbol = False , ** kws ) :
"""Read Gene Association File ( GAF ) . Return data ."""
|
return GafReader ( fin_gaf , hdr_only , prt = prt , allow_missing_symbol = allow_missing_symbol ) . get_id2gos ( ** kws )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.