signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def retrieve_all ( self , sids , default_none = False ) :
"""Retrieve all assets in ` sids ` .
Parameters
sids : iterable of int
Assets to retrieve .
default _ none : bool
If True , return None for failed lookups .
If False , raise ` SidsNotFound ` .
Returns
assets : list [ Asset or None ]
A list of the same length as ` sids ` containing Assets ( or Nones )
corresponding to the requested sids .
Raises
SidsNotFound
When a requested sid is not found and default _ none = False .""" | sids = list ( sids )
hits , missing , failures = { } , set ( ) , [ ]
for sid in sids :
try :
asset = self . _asset_cache [ sid ]
if not default_none and asset is None : # Bail early if we ' ve already cached that we don ' t know
# about an asset .
raise SidsNotFound ( sids = [ sid ] )
hits [ sid ] = asset
except KeyError :
missing . add ( sid )
# All requests were cache hits . Return requested sids in order .
if not missing :
return [ hits [ sid ] for sid in sids ]
update_hits = hits . update
# Look up cache misses by type .
type_to_assets = self . group_by_type ( missing )
# Handle failures
failures = { failure : None for failure in type_to_assets . pop ( None , ( ) ) }
update_hits ( failures )
self . _asset_cache . update ( failures )
if failures and not default_none :
raise SidsNotFound ( sids = list ( failures ) )
# We don ' t update the asset cache here because it should already be
# updated by ` self . retrieve _ equities ` .
update_hits ( self . retrieve_equities ( type_to_assets . pop ( 'equity' , ( ) ) ) )
update_hits ( self . retrieve_futures_contracts ( type_to_assets . pop ( 'future' , ( ) ) ) )
# We shouldn ' t know about any other asset types .
if type_to_assets :
raise AssertionError ( "Found asset types: %s" % list ( type_to_assets . keys ( ) ) )
return [ hits [ sid ] for sid in sids ] |
def recycle ( ) :
"""the purpose of this tasks is to recycle the data from the cache
with version = 2 in the main cache""" | # http : / / niwinz . github . io / django - redis / latest / # _ scan _ delete _ keys _ in _ bulk
for service in cache . iter_keys ( 'th_*' ) :
try : # get the value from the cache version = 2
service_value = cache . get ( service , version = 2 )
# put it in the version = 1
cache . set ( service , service_value )
# remote version = 2
cache . delete_pattern ( service , version = 2 )
except ValueError :
pass
logger . info ( 'recycle of cache done!' ) |
def add_wic_ports ( self , wic_slot ) :
"""Add the ports for a specific WIC to the node [ ' ports ' ] dictionary
: param str wic _ slot : WIC Slot ( wic0)""" | wic_slot_number = int ( wic_slot [ 3 ] )
wic_adapter = self . node [ 'properties' ] [ wic_slot ]
num_ports = ADAPTER_MATRIX [ wic_adapter ] [ 'ports' ]
port_type = ADAPTER_MATRIX [ wic_adapter ] [ 'type' ]
ports = [ ]
# Dynamips WICs port number start on a multiple of 16.
base = 16 * ( wic_slot_number + 1 )
# WICs are always in adapter slot 0.
slot = 0
for port_number in range ( num_ports ) :
phy_port_number = port_number + self . port_numbering [ port_type ]
port_name = PORT_TYPES [ port_type ] + '%s/%s' % ( slot , phy_port_number )
port_temp = { 'name' : port_name , 'id' : self . port_id , 'port_number' : base + port_number , 'slot_number' : slot }
ports . append ( port_temp )
self . port_id += 1
self . port_numbering [ port_type ] += num_ports
self . node [ 'ports' ] . extend ( ports ) |
def glob ( self , pathname , using = None , unite = False , basecolumn = 0 , parser = None , with_filename = False , recursive = False , natsort = True , ** kwargs ) :
"""Load data from file matched with given glob pattern .
Return value will be a list of data unless : attr : ` unite ` is ` True ` .
If : attr : ` unite ` is ` True ` , all dataset will be united into a single
data .
Parameters
pathname : string
A glob pattern
using : list of integer , slice instance , or None , optional
A list of index or slice instance used to slice data into column
If it is not specified , : attr : ` using ` specified in constructor
will be used instead .
unite : boolean , optional :
If it is ` True ` then dataset will be united into a single numpy
array . See usage for more detail .
basecolumn : integer , optional
An index of base column . all data will be trimmed based on the order
of this column when the number of samples are different among the
dataset .
It only affect when : attr : ` unite ` is specified as ` True ` .
parser : instance , optional
An instance or registered name of parser class .
If it is not specified , : attr : ` parser ` specified in constructor
will be used instead .
with _ filename : boolean , optional
If it is ` True ` , returning dataset will contain filename in the
first column .
It is cannot be used with : attr : ` unite = True `
recursive : boolean , optional
Recursively find pattern in the directory
natsort : boolean
Naturally sort found files .
Returns
ndarray
A list of numpy array""" | # argument check
if unite and with_filename :
raise AttributeError ( "`with_filename` attribute cannot be set True when " "`unite` attribute was set True." )
# make sure that the pathname is absolute
pathname = os . path . abspath ( pathname )
if recursive :
filelist = rglob ( pathname )
else :
filelist = glob ( pathname )
if natsort :
filelist = natsorted ( filelist , number_type = None )
# create dataset
dataset = [ ]
for filename in filelist :
data = self . load ( filename = filename , using = using , parser = parser , ** kwargs )
if with_filename :
data = [ filename ] + data
dataset . append ( data )
# tell the number of files found if verbose is True
if kwargs . get ( 'verbose' , False ) :
print "%d files are found with `%s`" % ( len ( dataset ) , os . path . relpath ( pathname ) )
# warn if nothing have found unless quiet is True
if len ( dataset ) == 0 and not kwargs . get ( 'quiet' , False ) :
warnings . warn ( "Nothing found with glob pattern '%s'" % pathname )
# unite dataset if specified
if unite and len ( dataset ) > 0 :
dataset = unite_dataset ( dataset , basecolumn )
return dataset |
def incoming_edges ( self , node ) :
"""Returns a ` ` tuple ` ` of incoming edges for a * * node object * * .
Arguments :
- node ( ` ` object ` ` ) * * node object * * present in the graph to be queried
for incoming edges .""" | edges = self . edges ( )
in_edges = [ ]
for out_node , in_node in edges :
if node is in_node :
in_edges . append ( ( out_node , in_node ) )
return tuple ( in_edges ) |
def build_mock_open_side_effect ( string_d , stream_d ) :
"""Build a mock open side effect using a dictionary of content for the files .
: param string _ d : keys are file names , values are string file contents
: param stream _ d : keys are file names , values are stream of contents""" | assert ( len ( set ( string_d . keys ( ) ) . intersection ( set ( stream_d . keys ( ) ) ) ) == 0 )
def mock_open_side_effect ( * args , ** kwargs ) :
if args [ 0 ] in string_d :
return StringIO . StringIO ( string_d [ args [ 0 ] ] )
elif args [ 0 ] in stream_d :
return stream_d [ args [ 0 ] ]
else :
raise IOError ( "No such file: " + args [ 0 ] )
return mock_open_side_effect |
def _compute_equations ( self , x , verbose = False ) :
'''Compute the values and the normals ( gradients ) of active constraints .
Arguments :
| ` ` x ` ` - - The unknowns .''' | # compute the error and the normals .
normals = [ ]
values = [ ]
signs = [ ]
error = 0.0
if verbose :
print ( )
print ( ' ' . join ( '% 10.3e' % val for val in x ) , end = ' ' )
active_str = ''
for i , ( sign , equation ) in enumerate ( self . equations ) :
value , normal = equation ( x )
if ( i < len ( self . lock ) and self . lock [ i ] ) or ( sign == - 1 and value > - self . threshold ) or ( sign == 0 ) or ( sign == 1 and value < self . threshold ) :
values . append ( value )
normals . append ( normal )
signs . append ( sign )
error += value ** 2
if verbose :
active_str += 'X'
if i < len ( self . lock ) :
self . lock [ i ] = True
elif verbose :
active_str += '-'
error = np . sqrt ( error )
normals = np . array ( normals , float )
values = np . array ( values , float )
signs = np . array ( signs , int )
if verbose :
print ( '[%s]' % active_str , end = ' ' )
if error < self . threshold :
print ( 'OK' )
else :
print ( '%.5e' % error )
return normals , values , error , signs |
def run_query_series ( queries , conn ) :
"""Iterates through a list of queries and runs them through the connection
Args :
queries : list of strings or tuples containing ( query _ string , kwargs )
conn : the triplestore connection to use""" | results = [ ]
for item in queries :
qry = item
kwargs = { }
if isinstance ( item , tuple ) :
qry = item [ 0 ]
kwargs = item [ 1 ]
result = conn . update_query ( qry , ** kwargs )
# pdb . set _ trace ( )
results . append ( result )
return results |
def make_thumbnail_name ( image_name , extension ) :
"""Return name of the downloaded thumbnail , based on the extension .""" | file_name , _ = os . path . splitext ( image_name )
return file_name + '.' + clean_extension ( extension ) |
def download_sample ( job , ids , input_args , sample ) :
"""Defines variables unique to a sample that are used in the rest of the pipelines
ids : dict Dictionary of fileStore IDS
input _ args : dict Dictionary of input arguments
sample : tuple Contains uuid and sample _ url""" | if len ( sample ) == 2 :
uuid , sample_location = sample
url1 , url2 = None , None
else :
uuid , url1 , url2 = sample
sample_location = None
# Update values unique to sample
sample_input = dict ( input_args )
sample_input [ 'uuid' ] = uuid
sample_input [ 'sample.tar' ] = sample_location
if sample_input [ 'output_dir' ] :
sample_input [ 'output_dir' ] = os . path . join ( input_args [ 'output_dir' ] , uuid )
sample_input [ 'cpu_count' ] = multiprocessing . cpu_count ( )
job_vars = ( sample_input , ids )
# Download or locate local file and place in the jobStore
if sample_input [ 'input' ] :
ids [ 'sample.tar' ] = job . fileStore . writeGlobalFile ( os . path . abspath ( sample_location ) )
elif sample_input [ 'config_fastq' ] :
ids [ 'R1.fastq' ] = job . fileStore . writeGlobalFile ( urlparse ( url1 ) . path )
ids [ 'R2.fastq' ] = job . fileStore . writeGlobalFile ( urlparse ( url2 ) . path )
else :
if sample_input [ 'ssec' ] :
ids [ 'sample.tar' ] = job . addChildJobFn ( download_encrypted_file , sample_input , 'sample.tar' , disk = '25G' ) . rv ( )
else :
ids [ 'sample.tar' ] = job . addChildJobFn ( download_from_url , sample_input [ 'sample.tar' ] , disk = '25G' ) . rv ( )
job . addFollowOnJobFn ( static_dag_launchpoint , job_vars ) |
def build ( self , ** values : Any ) -> str :
"""Build this rule into a path using the values given .""" | converted_values = { key : self . _converters [ key ] . to_url ( value ) for key , value in values . items ( ) if key in self . _converters }
result = self . _builder . format ( ** converted_values ) . split ( '|' , 1 ) [ 1 ]
query_string = urlencode ( { key : value for key , value in values . items ( ) if key not in self . _converters and key not in self . defaults } , doseq = True , )
if query_string :
result = "{}?{}" . format ( result , query_string )
return result |
def need_geocoding ( self ) :
"""Returns True if any of the required address components is missing""" | need_geocoding = False
for attribute , component in self . required_address_components . items ( ) :
if not getattr ( self , attribute ) :
need_geocoding = True
break
# skip extra loops
return need_geocoding |
def IterEnumerateInstancePaths ( self , ClassName , namespace = None , FilterQueryLanguage = None , FilterQuery = None , OperationTimeout = None , ContinueOnError = None , MaxObjectCount = DEFAULT_ITER_MAXOBJECTCOUNT , ** extra ) :
"""Enumerate the instance paths of instances of a class ( including
instances of its subclasses ) in a namespace , using the
Python : term : ` py : generator ` idiom to return the result .
* New in pywbem 0.10 as experimental and finalized in 0.12 . *
This method uses the corresponding pull operations if supported by the
WBEM server or otherwise the corresponding traditional operation .
This method is an alternative to using the pull operations directly ,
that frees the user of having to know whether the WBEM server supports
pull operations .
This method is a generator function that retrieves instance paths from
the WBEM server and returns them one by one ( using : keyword : ` yield ` )
when the caller iterates through the returned generator object . The
number of instance paths that are retrieved from the WBEM server in one
request ( and thus need to be materialized in this method ) is up to the
` MaxObjectCount ` parameter if the corresponding pull operations are
used , or the complete result set all at once if the corresponding
traditional operation is used .
By default , this method attempts to perform the corresponding pull
operations
( : meth : ` ~ pywbem . WBEMConnection . OpenEnumerateInstancePaths ` and
: meth : ` ~ pywbem . WBEMConnection . PullInstancePaths ` ) .
If these pull operations are not supported by the WBEM server , this
method falls back to using the corresponding traditional operation
( : meth : ` ~ pywbem . WBEMConnection . EnumerateInstanceNames ` ) .
Whether the WBEM server supports these pull operations is remembered
in the : class : ` ~ pywbem . WBEMConnection ` object ( by operation type ) , and
avoids unnecessary attempts to try these pull operations on that
connection in the future .
The ` use _ pull _ operations ` init parameter of
: class : ` ~ pywbem . WBEMConnection ` can be used to control the preference
for always using pull operations , always using traditional operations ,
or using pull operations if supported by the WBEM server ( the default ) .
This method provides all of the controls of the corresponding pull
operations except for the ability to set different response sizes on
each request ; the response size ( defined by the ` MaxObjectCount `
parameter ) is the same for all pull operations in the enumeration
session .
In addition , some functionality is only available if the corresponding
pull operations are used by this method :
* Filtering is not supported for the corresponding traditional
operation so that setting the ` FilterQuery ` or ` FilterQueryLanguage `
parameters will be rejected if the corresponding traditional
operation is used by this method .
Note that this limitation is not a disadvantage compared to using the
corresponding pull operations directly , because in both cases , the
WBEM server must support the pull operations and their filtering
capability in order for the filtering to work .
* Setting the ` ContinueOnError ` parameter to ` True ` will be rejected if
the corresponding traditional operation is used by this method .
The enumeration session that is opened with the WBEM server when using
pull operations is closed automatically when the returned generator
object is exhausted , or when the generator object is closed using its
: meth : ` ~ py : generator . close ` method ( which may also be called before the
generator is exhausted ) .
Parameters :
ClassName ( : term : ` string ` or : class : ` ~ pywbem . CIMClassName ` ) :
Name of the class to be enumerated ( case independent ) .
If specified as a : class : ` ~ pywbem . CIMClassName ` object , its
` namespace ` attribute will be used as a default namespace as
described for the ` namespace ` parameter , and its ` host ` attribute
will be ignored .
namespace ( : term : ` string ` ) :
Name of the CIM namespace to be used ( case independent ) .
Leading and trailing slash characters will be stripped . The lexical
case will be preserved .
If ` None ` , the namespace of the ` ClassName ` parameter will be used ,
if specified as a : class : ` ~ pywbem . CIMClassName ` object . If that is
also ` None ` , the default namespace of the connection will be used .
FilterQueryLanguage ( : term : ` string ` ) :
The name of the filter query language used for the ` FilterQuery `
parameter . The DMTF - defined Filter Query Language ( see
: term : ` DSP0212 ` ) is specified as " DMTF : FQL " .
If this parameter is not ` None ` and the traditional operation is
used by this method , : exc : ` ~ py : exceptions . ValueError ` will be
raised .
Not all WBEM servers support filtering for this operation because
it returns instance paths and the act of the server filtering
requires that it generate instances just for that purpose and then
discard them .
FilterQuery ( : term : ` string ` ) :
The filter query in the query language defined by the
` FilterQueryLanguage ` parameter .
If this parameter is not ` None ` and the traditional operation is
used by this method , : exc : ` ~ py : exceptions . ValueError ` will be
raised .
OperationTimeout ( : class : ` ~ pywbem . Uint32 ` ) :
Minimum time in seconds the WBEM Server shall maintain an open
enumeration session after a previous Open or Pull request is
sent to the client . Once this timeout time has expired , the
WBEM server may close the enumeration session .
* If not ` None ` , this parameter is sent to the WBEM server as the
proposed timeout for the enumeration session . A value of 0
indicates that the server is expected to never time out . The
server may reject the proposed value , causing a
: class : ` ~ pywbem . CIMError ` to be raised with status code
: attr : ` ~ pywbem . CIM _ ERR _ INVALID _ OPERATION _ TIMEOUT ` .
* If ` None ` , this parameter is not passed to the WBEM server , and
causes the server - implemented default timeout to be used .
ContinueOnError ( : class : ` py : bool ` ) :
Indicates to the WBEM server to continue sending responses
after an error response has been sent .
* If ` True ` , the server is to continue sending responses after
sending an error response . Not all servers support continuation
on error ; a server that does not support it must send an error
response if ` True ` was specified , causing
: class : ` ~ pywbem . CIMError ` to be raised with status code
: attr : ` ~ pywbem . CIM _ ERR _ CONTINUATION _ ON _ ERROR _ NOT _ SUPPORTED ` .
If the corresponding traditional operation is used by this
method , : exc : ` ~ py : exceptions . ValueError ` will be raised .
* If ` False ` , the server is requested to close the enumeration after
sending an error response .
* If ` None ` , this parameter is not passed to the WBEM server , and
causes the server - implemented default behaviour to be used .
: term : ` DSP0200 ` defines that the server - implemented default is
` False ` .
MaxObjectCount ( : class : ` ~ pywbem . Uint32 ` )
Maximum number of instances the WBEM server may return for each of
the open and pull requests issued during the iterations over the
returned generator object .
* If positive , the WBEM server is to return no more than the
specified number of instance paths .
* Zero is not allowed ; it would mean that zero paths
are to be returned for every request issued .
* The default is defined as a system config variable .
* ` None ` is not allowed .
The choice of MaxObjectCount is client / server dependent but choices
between 100 and 1000 typically do not have a significant impact on
either memory or overall efficiency .
* * extra :
Additional keyword arguments are passed as additional operation
parameters to the WBEM server .
Note that : term : ` DSP0200 ` does not define any additional parameters
for this operation .
Raises :
Exceptions described in : class : ` ~ pywbem . WBEMConnection ` .
Returns :
: term : ` py : generator ` iterating : class : ` ~ pywbem . CIMInstanceName ` :
A generator object that iterates the resulting CIM instance paths .
These instance paths have their host and namespace components set .
Example : :
paths _ generator = conn . IterEnumerateInstancePaths ( ' CIM _ Blah ' )
for path in paths _ generator :
print ( ' path { 0 } ' . format ( path ) )""" | _validateIterCommonParams ( MaxObjectCount , OperationTimeout )
# Common variable for pull result tuple used by pulls and finally :
pull_result = None
try : # try / finally block to allow iter . close ( )
if ( self . _use_enum_path_pull_operations is None or self . _use_enum_path_pull_operations ) :
try : # operation try block
pull_result = self . OpenEnumerateInstancePaths ( ClassName , namespace = namespace , FilterQueryLanguage = FilterQueryLanguage , FilterQuery = FilterQuery , OperationTimeout = OperationTimeout , ContinueOnError = ContinueOnError , MaxObjectCount = MaxObjectCount , ** extra )
# Open operation succeeded ; set has _ pull flag
self . _use_enum_path_pull_operations = True
for inst in pull_result . paths :
yield inst
# Loop to pull while more while eos not returned .
while not pull_result . eos :
pull_result = self . PullInstancePaths ( pull_result . context , MaxObjectCount = MaxObjectCount )
for inst in pull_result . paths :
yield inst
pull_result = None
# clear the pull _ result
return
# If NOT _ SUPPORTED and first request , set flag and try
# alternative request operation .
# If use _ pull _ operations is True , always raise the exception
except CIMError as ce :
if ( self . _use_enum_path_pull_operations is None and ce . status_code == CIM_ERR_NOT_SUPPORTED ) :
self . _use_enum_path_pull_operations = False
else :
raise
# Alternate request if Pull not implemented . This does not allow
# the FilterQuery or ContinueOnError
assert self . _use_enum_path_pull_operations is False
if FilterQuery is not None or FilterQueryLanguage is not None :
raise ValueError ( 'EnumerateInstanceNnames does not support' ' FilterQuery.' )
if ContinueOnError is not None :
raise ValueError ( 'EnumerateInstanceNames does not support ' 'ContinueOnError.' )
enum_rslt = self . EnumerateInstanceNames ( ClassName , namespace = namespace , ** extra )
# pylint : disable = unused - variable
host , port , ssl = parse_url ( self . url )
# get namespace for the operation
if namespace is None and isinstance ( ClassName , CIMClassName ) :
namespace = ClassName . namespace
namespace = self . _iparam_namespace_from_namespace ( namespace )
for path in enum_rslt :
if path . namespace is None :
path . namespace = namespace
if path . host is None :
path . host = host
for inst in enum_rslt :
yield inst
# Cleanup if caller closes the iterator before exhausting it
finally : # Cleanup only required if the pull context is open and not complete
if pull_result is not None and not pull_result . eos :
self . CloseEnumeration ( pull_result . context )
pull_result = None |
def update_estimator_from_task ( estimator , task_id , task_type ) :
"""Update training job of the estimator from a task in the DAG
Args :
estimator ( sagemaker . estimator . EstimatorBase ) : The estimator to update
task _ id ( str ) : The task id of any airflow . contrib . operators . SageMakerTrainingOperator or
airflow . contrib . operators . SageMakerTuningOperator that generates training jobs in the DAG .
task _ type ( str ) : Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator . Values can be
' training ' , ' tuning ' or None ( which means training job is not from any task ) .""" | if task_type is None :
return
if task_type . lower ( ) == 'training' :
training_job = "{{ ti.xcom_pull(task_ids='%s')['Training']['TrainingJobName'] }}" % task_id
job_name = training_job
elif task_type . lower ( ) == 'tuning' :
training_job = "{{ ti.xcom_pull(task_ids='%s')['Tuning']['BestTrainingJob']['TrainingJobName'] }}" % task_id
# need to strip the double quotes in json to get the string
job_name = "{{ ti.xcom_pull(task_ids='%s')['Tuning']['TrainingJobDefinition']['StaticHyperParameters']" "['sagemaker_job_name'].strip('%s') }}" % ( task_id , '"' )
else :
raise ValueError ( "task_type must be either 'training', 'tuning' or None." )
estimator . _current_job_name = training_job
if isinstance ( estimator , sagemaker . estimator . Framework ) :
update_submit_s3_uri ( estimator , job_name ) |
def notify ( self , msgtype , method , params ) :
"""Handle an incoming notify request .""" | self . dispatch . call ( method , params ) |
def get_addon_module_name ( addonxml_filename ) :
'''Attempts to extract a module name for the given addon ' s addon . xml file .
Looks for the ' xbmc . python . pluginsource ' extension node and returns the
addon ' s filename without the . py suffix .''' | try :
xml = ET . parse ( addonxml_filename ) . getroot ( )
except IOError :
sys . exit ( 'Cannot find an addon.xml file in the current working ' 'directory. Please run this command from the root directory ' 'of an addon.' )
try :
plugin_source = ( ext for ext in xml . findall ( 'extension' ) if ext . get ( 'point' ) == 'xbmc.python.pluginsource' ) . next ( )
except StopIteration :
sys . exit ( 'ERROR, no pluginsource in addonxml' )
return plugin_source . get ( 'library' ) . split ( '.' ) [ 0 ] |
def _get_env_data ( self , reload = False ) :
"""Get the data about the available environments .
env _ data is a structure { name - > ( resourcedir , kernel spec ) }""" | # This is called much too often and finding - process is really expensive : - (
if not reload and getattr ( self , "_env_data_cache" , { } ) :
return getattr ( self , "_env_data_cache" )
env_data = { }
for supplyer in ENV_SUPPLYER :
env_data . update ( supplyer ( self ) )
env_data = { name : env_data [ name ] for name in env_data if self . validate_env ( name ) }
new_kernels = [ env for env in list ( env_data . keys ( ) ) if env not in list ( self . _env_data_cache . keys ( ) ) ]
if new_kernels :
self . log . info ( "Found new kernels in environments: %s" , ", " . join ( new_kernels ) )
self . _env_data_cache = env_data
return env_data |
def artist_to_ref ( artist ) :
"""Convert a mopidy artist to a mopidy ref .""" | if artist . name :
name = artist . name
else :
name = 'Unknown artist'
return Ref . directory ( uri = artist . uri , name = name ) |
def convert ( self ) :
"""Copies data from RAPID netCDF output to a CF - compliant netCDF file .""" | try :
log ( 'Processing %s ...' % self . rapid_output_file_list [ 0 ] )
time_start_conversion = datetime . utcnow ( )
# Validate the raw netCDF file
log ( 'validating input netCDF file' , 'INFO' )
id_len , time_len = self . _validate_raw_nc ( )
# Initialize the output file ( create dimensions and variables )
log ( 'initializing output' , 'INFO' )
self . _initialize_output ( time_len , id_len )
self . _generate_time_values ( )
# copy river ids over
self . cf_nc . variables [ self . output_id_dim_name ] [ : ] = self . raw_nc_list [ 0 ] . get_river_id_array ( )
# Populate comid , lat , lon , z
log ( 'writing comid lat lon z' )
lookup_start = datetime . now ( )
self . _write_comid_lat_lon_z ( )
duration = str ( ( datetime . now ( ) - lookup_start ) . total_seconds ( ) )
log ( 'Lookup Duration (s): ' + duration )
# Create a variable for streamflow . This is big , and slows down
# previous steps if we do it earlier .
self . _copy_streamflow_values ( )
# close files
for raw_nc in self . raw_nc_list :
raw_nc . close ( )
self . cf_nc . close ( )
# delete original RAPID output
remove_files ( * self . rapid_output_file_list )
# rename nc compliant file to original name
os . rename ( self . cf_compliant_file , self . rapid_output_file_list [ 0 ] )
log ( 'Time to process %s' % ( datetime . utcnow ( ) - time_start_conversion ) )
except Exception : # delete cf RAPID output
remove_files ( self . cf_compliant_file )
raise |
def convert_ages ( Recs , data_model = 3 ) :
"""converts ages to Ma
Parameters
_ _ _ _ _
Recs : list of dictionaries in data model by data _ model
data _ model : MagIC data model ( default is 3)""" | if data_model == 3 :
site_key = 'site'
agekey = "age"
keybase = ""
else :
site_key = 'er_site_names'
agekey = find ( 'age' , list ( rec . keys ( ) ) )
if agekey != "" :
keybase = agekey . split ( '_' ) [ 0 ] + '_'
New = [ ]
for rec in Recs :
age = ''
if rec [ keybase + 'age' ] != "" :
age = float ( rec [ keybase + "age" ] )
elif rec [ keybase + 'age_low' ] != "" and rec [ keybase + 'age_high' ] != '' :
age = np . mean ( [ rec [ keybase + 'age_high' ] , rec [ keybase + "age_low" ] ] )
# age = float ( rec [ keybase + ' age _ low ' ] ) + old _ div (
# ( float ( rec [ keybase + ' age _ high ' ] ) - float ( rec [ keybase + ' age _ low ' ] ) ) , 2 . )
if age != '' :
rec [ keybase + 'age_unit' ]
if rec [ keybase + 'age_unit' ] == 'Ma' :
rec [ keybase + 'age' ] = '%10.4e' % ( age )
elif rec [ keybase + 'age_unit' ] == 'ka' or rec [ keybase + 'age_unit' ] == 'Ka' :
rec [ keybase + 'age' ] = '%10.4e' % ( age * .001 )
elif rec [ keybase + 'age_unit' ] == 'Years AD (+/-)' :
rec [ keybase + 'age' ] = '%10.4e' % ( ( 2011 - age ) * 1e-6 )
elif rec [ keybase + 'age_unit' ] == 'Years BP' :
rec [ keybase + 'age' ] = '%10.4e' % ( ( age ) * 1e-6 )
rec [ keybase + 'age_unit' ] = 'Ma'
New . append ( rec )
else :
if 'site_key' in list ( rec . keys ( ) ) :
print ( 'problem in convert_ages:' , rec [ 'site_key' ] )
elif 'er_site_name' in list ( rec . keys ( ) ) :
print ( 'problem in convert_ages:' , rec [ 'site_key' ] )
else :
print ( 'problem in convert_ages:' , rec )
if len ( New ) == 0 :
print ( 'no age key:' , rec )
return New |
def dict_match ( d , key , default = None ) :
"""Like _ _ getitem _ _ but works as if the keys ( ) are all filename patterns .
Returns the value of any dict key that matches the passed key .
Args :
d ( dict ) : A dict with filename patterns as keys
key ( str ) : A key potentially matching any of the keys
default ( object ) : The object to return if no pattern matched the
passed in key
Returns :
object : The dict value where the dict key matched the passed in key .
Or default if there was no match .""" | if key in d and "[" not in key :
return d [ key ]
else :
for pattern , value in iteritems ( d ) :
if fnmatchcase ( key , pattern ) :
return value
return default |
def find_last_character_instance ( sentence , character ) :
"""Function to locate the last instance of a character in a string .
Example :
find _ last _ character _ instance ( ' hello world ' , ' l ' ) - > 10
find _ last _ character _ instance ( ' language ' , ' g ' ) - > 7
find _ last _ character _ instance ( ' little ' , ' y ' ) - > None
Parameters :
sentence : The string in which to search for the character .
character : The character to be located .
Returns :
int : The index at which the character was last found , None if not found .""" | last_seen = - 1
for idx in range ( len ( sentence ) ) :
if sentence [ idx ] == character :
last_seen = idx
return None if last_seen == - 1 else ( last_seen + 1 ) |
def end_time ( self ) :
"""Return the end time of the object .""" | try : # MSG :
try :
return datetime . strptime ( self . nc . attrs [ 'time_coverage_end' ] , '%Y-%m-%dT%H:%M:%SZ' )
except TypeError :
return datetime . strptime ( self . nc . attrs [ 'time_coverage_end' ] . astype ( str ) , '%Y-%m-%dT%H:%M:%SZ' )
except ValueError : # PPS :
return datetime . strptime ( self . nc . attrs [ 'time_coverage_end' ] , '%Y%m%dT%H%M%S%fZ' ) |
def get_digests ( self ) :
"""Returns a map of repositories to digests""" | digests = { }
# repository - > digest
for registry in self . workflow . push_conf . docker_registries :
for image in self . workflow . tag_conf . images :
image_str = image . to_str ( )
if image_str in registry . digests :
digest = registry . digests [ image_str ]
digests [ image . to_str ( registry = False ) ] = digest
return digests |
def d ( obj , mode = 'exec' , file = None ) :
"""Interactive convenience for displaying the disassembly of a function ,
module , or code string .
Compiles ` text ` and recursively traverses the result looking for ` code `
objects to render with ` dis . dis ` .
Parameters
obj : str , CodeType , or object with _ _ code _ _ attribute
Object to disassemble .
If ` obj ` is an instance of CodeType , we use it unchanged .
If ` obj ` is a string , we compile it with ` mode ` and then disassemble .
Otherwise , we look for a ` _ _ code _ _ ` attribute on ` obj ` .
mode : { ' exec ' , ' eval ' } , optional
Mode for ` compile ` . Default is ' exec ' .
file : None or file - like object , optional
File to use to print output . If the default of ` None ` is passed , we
use sys . stdout .""" | if file is None :
file = sys . stdout
for name , co in walk_code ( extract_code ( obj , compile_mode = mode ) ) :
print ( name , file = file )
print ( '-' * len ( name ) , file = file )
dis . dis ( co , file = file )
print ( '' , file = file ) |
def zopen ( filename , * args , ** kwargs ) :
"""This function wraps around the bz2 , gzip and standard python ' s open
function to deal intelligently with bzipped , gzipped or standard text
files .
Args :
filename ( str / Path ) : filename or pathlib . Path .
\*args: Standard args for python open(..). E.g., 'r' for read, 'w' for
write .
\*\*kwargs: Standard kwargs for python open(..).
Returns :
File - like object . Supports with context .""" | if Path is not None and isinstance ( filename , Path ) :
filename = str ( filename )
name , ext = os . path . splitext ( filename )
ext = ext . upper ( )
if ext == ".BZ2" :
if PY_VERSION [ 0 ] >= 3 :
return bz2 . open ( filename , * args , ** kwargs )
else :
args = list ( args )
if len ( args ) > 0 :
args [ 0 ] = "" . join ( [ c for c in args [ 0 ] if c != "t" ] )
if "mode" in kwargs :
kwargs [ "mode" ] = "" . join ( [ c for c in kwargs [ "mode" ] if c != "t" ] )
return bz2 . BZ2File ( filename , * args , ** kwargs )
elif ext in ( ".GZ" , ".Z" ) :
return gzip . open ( filename , * args , ** kwargs )
else :
return io . open ( filename , * args , ** kwargs ) |
def looking_for_pub ( self ) :
'''Look for a pub that accepts me and my friends''' | if self [ 'pub' ] != None :
return self . sober_in_pub
self . debug ( 'I am looking for a pub' )
group = list ( self . get_neighboring_agents ( ) )
for pub in self . env . available_pubs ( ) :
self . debug ( 'We\'re trying to get into {}: total: {}' . format ( pub , len ( group ) ) )
if self . env . enter ( pub , self , * group ) :
self . info ( 'We\'re all {} getting in {}!' . format ( len ( group ) , pub ) )
return self . sober_in_pub |
def rec_split_path ( path ) :
'''将一个路径进行分隔 , 分别得到每父母的绝对路径及目录名''' | if len ( path ) > 1 and path . endswith ( '/' ) :
path = path [ : - 1 ]
if '/' not in path :
return [ path , ]
result = [ ]
while path != '/' :
parent , name = os . path . split ( path )
result . append ( ( path , name ) )
path = parent
result . append ( ( '/' , '/' ) )
result . reverse ( )
return result |
def fuller ( target , MA , MB , vA , vB , temperature = 'pore.temperature' , pressure = 'pore.pressure' ) :
r"""Uses Fuller model to estimate diffusion coefficient for gases from first
principles at conditions of interest
Parameters
target : OpenPNM Object
The object for which these values are being calculated . This
controls the length of the calculated array , and also provides
access to other necessary thermofluid properties .
MA : float , array _ like
Molecular weight of component A [ kg / mol ]
MB : float , array _ like
Molecular weight of component B [ kg / mol ]
vA : float , array _ like
Sum of atomic diffusion volumes for component A
vB : float , array _ like
Sum of atomic diffusion volumes for component B
pressure : string
The dictionary key containing the pressure values in Pascals ( Pa )
temperature : string
The dictionary key containing the temperature values in Kelvin ( K )""" | T = target [ temperature ]
P = target [ pressure ]
MAB = 2 * ( 1.0 / MA + 1.0 / MB ) ** ( - 1 )
MAB = MAB * 1e3
P = P * 1e-5
value = 0.00143 * T ** 1.75 / ( P * ( MAB ** 0.5 ) * ( vA ** ( 1. / 3 ) + vB ** ( 1. / 3 ) ) ** 2 ) * 1e-4
return value |
def inputtemplates ( self ) :
"""Return all input templates as a list ( of InputTemplate instances )""" | l = [ ]
for profile in self . profiles :
l += profile . input
return l |
def update ( gandi , domain , zone_id , file , record , new_record ) :
"""Update records entries for a domain .
You can update an individual record using - - record and - - new - record
parameters
Or you can use a plaintext file to update all records of a DNS zone at
once with - - file parameter .""" | if not zone_id :
result = gandi . domain . info ( domain )
zone_id = result [ 'zone_id' ]
if not zone_id :
gandi . echo ( 'No zone records found, domain %s doesn\'t seems to be' ' managed at Gandi.' % domain )
return
if file :
records = file . read ( )
result = gandi . record . zone_update ( zone_id , records )
return result
elif record and new_record :
result = gandi . record . update ( zone_id , record , new_record )
return result
else :
gandi . echo ( 'You must indicate a zone file or a record.' ' Use `gandi record update --help` for more information' ) |
def PackagePublish ( package , classification , visibility , os ) :
"""Publishes a Blueprint Package for use within the Blueprint Designer .
https : / / t3n . zendesk . com / entries / 20426453 - Publish - Package
: param package : path to zip file containing package . manifest and supporting scripts
: param classification : package type ( System , Script , Software )
: param visibility : package visibility filter ( Public , Private , Shared )
: param os : list of ints containing Operating System template IDs""" | r = clc . v1 . API . Call ( 'post' , 'Blueprint/PublishPackage' , { 'Classification' : Blueprint . classification_stoi [ classification ] , 'Name' : package , 'OperatingSystems' : os , 'Visibility' : Blueprint . visibility_stoi [ visibility ] } )
if int ( r [ 'StatusCode' ] ) == 0 :
return ( r ) |
def auto_zoom ( zoomx = True , zoomy = True , axes = "gca" , x_space = 0.04 , y_space = 0.04 , draw = True ) :
"""Looks at the bounds of the plotted data and zooms accordingly , leaving some
space around the data .""" | # Disable auto - updating by default .
_pylab . ioff ( )
if axes == "gca" :
axes = _pylab . gca ( )
# get the current bounds
x10 , x20 = axes . get_xlim ( )
y10 , y20 = axes . get_ylim ( )
# Autoscale using pylab ' s technique ( catches the error bars ! )
axes . autoscale ( enable = True , tight = True )
# Add padding
if axes . get_xscale ( ) == 'linear' :
x1 , x2 = axes . get_xlim ( )
xc = 0.5 * ( x1 + x2 )
xs = 0.5 * ( 1 + x_space ) * ( x2 - x1 )
axes . set_xlim ( xc - xs , xc + xs )
if axes . get_yscale ( ) == 'linear' :
y1 , y2 = axes . get_ylim ( )
yc = 0.5 * ( y1 + y2 )
ys = 0.5 * ( 1 + y_space ) * ( y2 - y1 )
axes . set_ylim ( yc - ys , yc + ys )
# If we weren ' t supposed to zoom x or y , reset them
if not zoomx :
axes . set_xlim ( x10 , x20 )
if not zoomy :
axes . set_ylim ( y10 , y20 )
if draw :
_pylab . ion ( )
_pylab . draw ( ) |
def pasa ( args ) :
"""% prog pasa pasa _ db fastafile
Run EVM in TIGR - only mode .""" | p = OptionParser ( pasa . __doc__ )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
pasa_db , fastafile = args
termexons = "pasa.terminal_exons.gff3"
if need_update ( fastafile , termexons ) :
cmd = "$ANNOT_DEVEL/PASA2/scripts/pasa_asmbls_to_training_set.dbi"
cmd += ' -M "{0}:mysql.tigr.org" -p "access:access"' . format ( pasa_db )
cmd += ' -g {0}' . format ( fastafile )
sh ( cmd )
cmd = "$EVM/PasaUtils/retrieve_terminal_CDS_exons.pl"
cmd += " trainingSetCandidates.fasta trainingSetCandidates.gff"
sh ( cmd , outfile = termexons )
return termexons |
def block ( self ) :
"""While this context manager is active any signals for aborting
the process will be queued and exit the program once the context
is left .""" | self . _nosig = True
yield
self . _nosig = False
if self . _interrupted :
raise SystemExit ( "Aborted..." ) |
def _evaluate_one ( caller , svtype , size_range , ensemble , truth , data ) :
"""Compare a ensemble results for a caller against a specific caller and SV type .""" | def cnv_matches ( name ) :
return cnv_to_event ( name , data ) == svtype
def is_breakend ( name ) :
return name . startswith ( "BND" )
def in_size_range ( max_buffer = 0 ) :
def _work ( feat ) :
minf , maxf = size_range
buffer = min ( max_buffer , int ( ( ( maxf + minf ) / 2.0 ) / 10.0 ) )
size = feat . end - feat . start
return size >= max ( [ 0 , minf - buffer ] ) and size < maxf + buffer
return _work
def is_caller_svtype ( feat ) :
for name in feat . name . split ( "," ) :
if ( ( name . startswith ( svtype ) or cnv_matches ( name ) or is_breakend ( name ) ) and ( caller == "sv-ensemble" or name . endswith ( caller ) ) ) :
return True
return False
minf , maxf = size_range
efeats = pybedtools . BedTool ( ensemble ) . filter ( in_size_range ( 0 ) ) . filter ( is_caller_svtype ) . saveas ( ) . sort ( ) . merge ( )
tfeats = pybedtools . BedTool ( truth ) . filter ( in_size_range ( 0 ) ) . sort ( ) . merge ( ) . saveas ( )
etotal = efeats . count ( )
ttotal = tfeats . count ( )
match = efeats . intersect ( tfeats , u = True ) . sort ( ) . merge ( ) . saveas ( ) . count ( )
return { "sensitivity" : _stat_str ( match , ttotal ) , "precision" : _stat_str ( match , etotal ) } |
def _get_tiles ( board = None , terrain = None , numbers = None ) :
"""Generate a list of tiles using the given terrain and numbers options .
terrain options supported :
- Opt . empty - > all tiles are desert
- Opt . random - > tiles are randomized
- Opt . preset - >
- Opt . debug - > alias for Opt . random
numbers options supported :
- Opt . empty - > no tiles have numbers
- Opt . random - > numbers are randomized
- Opt . preset - >
- Opt . debug - > alias for Opt . random
: param terrain _ opts : Opt
: param numbers _ opts : Opt
: return : list ( Tile )""" | if board is not None : # we have a board given , ignore the terrain and numbers opts and log warnings
# if they were supplied
tiles = _read_tiles_from_string ( board )
else : # we are being asked to generate a board
tiles = _generate_tiles ( terrain , numbers )
return tiles |
def rn_theory ( af , b ) :
"""R ( n ) ratio expected from theory for given noise type
alpha = b + 2""" | # From IEEE1139-2008
# alpha beta ADEV _ mu MDEV _ mu Rn _ mu
# -2 - 4 1 1 0 Random Walk FM
# -1 - 3 0 0 0 Flicker FM
# 0 - 2 - 1 - 1 0 White FM
# 1 - 1 - 2 - 2 0 Flicker PM
# 2 0 - 2 - 3 - 1 White PM
# ( a = - 3 flicker walk FM )
# ( a = - 4 random run FM )
if b == 0 :
return pow ( af , - 1 )
elif b == - 1 : # f _ h = 0.5 / tau0 ( assumed ! )
# af = tau / tau0
# so f _ h * tau = 0.5 / tau0 * af * tau0 = 0.5 * af
avar = ( 1.038 + 3 * np . log ( 2 * np . pi * 0.5 * af ) ) / ( 4.0 * pow ( np . pi , 2 ) )
mvar = 3 * np . log ( 256.0 / 27.0 ) / ( 8.0 * pow ( np . pi , 2 ) )
return mvar / avar
else :
return pow ( af , 0 ) |
def msg2long_form ( msg , processor , ** config ) :
"""Return a ' long form ' text representation of a message .
For most message , this will just default to the terse subtitle , but for
some messages a long paragraph - structured block of text may be returned .""" | result = processor . long_form ( msg , ** config )
if not result :
result = processor . subtitle ( msg , ** config )
return result |
def restore ( self ) :
"""Restores this DriveItem Version .
You can not restore the current version ( last one ) .
: return : Success / Failure
: rtype : bool""" | url = self . build_url ( self . _endpoints . get ( 'restore' ) . format ( id = self . object_id ) )
response = self . con . post ( url )
return bool ( response ) |
def towgs84 ( E , N , pkm = False , presentation = None ) :
"""Convert coordintes from TWD97 to WGS84
The east and north coordinates should be in meters and in float
pkm true for Penghu , Kinmen and Matsu area
You can specify one of the following presentations of the returned values :
dms - A tuple with degrees ( int ) , minutes ( int ) and seconds ( float )
dmsstr - [ + / - ] DDD ° MMM ' DDD . DDDDD " ( unicode )
mindec - A tuple with degrees ( int ) and minutes ( float )
mindecstr - [ + / - ] DDD ° MMM . MMMMM ' ( unicode )
( default ) degdec - DDD . DDDDD ( float )""" | _lng0 = lng0pkm if pkm else lng0
E /= 1000.0
N /= 1000.0
epsilon = ( N - N0 ) / ( k0 * A )
eta = ( E - E0 ) / ( k0 * A )
epsilonp = epsilon - beta1 * sin ( 2 * 1 * epsilon ) * cosh ( 2 * 1 * eta ) - beta2 * sin ( 2 * 2 * epsilon ) * cosh ( 2 * 2 * eta ) - beta3 * sin ( 2 * 3 * epsilon ) * cosh ( 2 * 3 * eta )
etap = eta - beta1 * cos ( 2 * 1 * epsilon ) * sinh ( 2 * 1 * eta ) - beta2 * cos ( 2 * 2 * epsilon ) * sinh ( 2 * 2 * eta ) - beta3 * cos ( 2 * 3 * epsilon ) * sinh ( 2 * 3 * eta )
sigmap = 1 - 2 * 1 * beta1 * cos ( 2 * 1 * epsilon ) * cosh ( 2 * 1 * eta ) - 2 * 2 * beta2 * cos ( 2 * 2 * epsilon ) * cosh ( 2 * 2 * eta ) - 2 * 3 * beta3 * cos ( 2 * 3 * epsilon ) * cosh ( 2 * 3 * eta )
taup = 2 * 1 * beta1 * sin ( 2 * 1 * epsilon ) * sinh ( 2 * 1 * eta ) + 2 * 2 * beta2 * sin ( 2 * 2 * epsilon ) * sinh ( 2 * 2 * eta ) + 2 * 3 * beta3 * sin ( 2 * 3 * epsilon ) * sinh ( 2 * 3 * eta )
chi = asin ( sin ( epsilonp ) / cosh ( etap ) )
latitude = chi + delta1 * sin ( 2 * 1 * chi ) + delta2 * sin ( 2 * 2 * chi ) + delta3 * sin ( 2 * 3 * chi )
longitude = _lng0 + atan ( sinh ( etap ) / cos ( epsilonp ) )
func = None
presentation = 'to%s' % presentation if presentation else None
if presentation in presentations :
func = getattr ( sys . modules [ __name__ ] , presentation )
if func and func != 'todegdec' :
return func ( degrees ( latitude ) ) , func ( degrees ( longitude ) )
return ( degrees ( latitude ) , degrees ( longitude ) ) |
def transform_incoming ( self , son , collection ) :
"""Recursively replace all keys that need transforming .""" | return self . _transform_incoming ( copy . deepcopy ( son ) , collection ) |
def check_dihedral ( self , construction_table ) :
"""Checks , if the dihedral defining atom is colinear .
Checks for each index starting from the third row of the
` ` construction _ table ` ` , if the reference atoms are colinear .
Args :
construction _ table ( pd . DataFrame ) :
Returns :
list : A list of problematic indices .""" | c_table = construction_table
angles = self . get_angle_degrees ( c_table . iloc [ 3 : , : ] . values )
problem_index = np . nonzero ( ( 175 < angles ) | ( angles < 5 ) ) [ 0 ]
rename = dict ( enumerate ( c_table . index [ 3 : ] ) )
problem_index = [ rename [ i ] for i in problem_index ]
return problem_index |
def send_media_file ( self , filename ) :
"""Function used to send media files from the media folder to the browser .""" | cache_timeout = self . get_send_file_max_age ( filename )
return send_from_directory ( self . config [ 'MEDIA_FOLDER' ] , filename , cache_timeout = cache_timeout ) |
def has_enacted ( self , billing_cycle ) :
"""Has this recurring cost already enacted transactions for given billing cycle ?""" | return RecurredCost . objects . filter ( recurring_cost = self , billing_cycle = billing_cycle , ) . exists ( ) |
def set_log_level ( self , log_level ) :
'''Configures class log level
Arguments :
log _ level ( : obj : ` str ` ) : log level ( ' NOTSET ' , ' DEBUG ' , ' INFO ' ' WARNING ' ,
' ERROR ' , ' CRITICAL ' )''' | if log_level == 'DEBUG' :
self . log . setLevel ( logging . DEBUG )
self . log . debug ( "Changing log level to " + log_level )
elif log_level == 'INFO' :
self . log . setLevel ( logging . INFO )
self . log . info ( "Changing log level to " + log_level )
elif log_level == 'WARNING' :
self . log . setLevel ( logging . WARNING )
self . log . warning ( "Changing log level to " + log_level )
elif log_level == 'ERROR' :
self . log . setLevel ( logging . ERROR )
self . log . error ( "Changing log level to " + log_level )
elif log_level == 'CRITICAL' :
self . log . setLevel ( logging . CRITICAL )
self . log . critical ( "Changing log level to " + log_level )
elif log_level == 'NOTSET' :
self . log . setLevel ( logging . NOTSET )
else :
raise NotImplementedError ( 'Not implemented log level ' + str ( log_level ) ) |
def file_list_hosts ( blockchain_id , wallet_keys = None , config_path = CONFIG_PATH ) :
"""Given a blockchain ID , find out the hosts the blockchain ID owner has registered keys for .
Return { ' status ' : True , ' hosts ' : hostnames } on success
Return { ' error ' : . . . } on failure""" | config_dir = os . path . dirname ( config_path )
try :
ret = blockstack_gpg . gpg_list_app_keys ( blockchain_id , APP_NAME , wallet_keys = wallet_keys , config_dir = config_dir )
except Exception , e :
ret = { 'error' : traceback . format_exc ( e ) }
if 'error' in ret :
log . error ( "Failed to list app keys: %s" % ret [ 'error' ] )
return { 'error' : 'Failed to list app keys' }
hosts = [ ]
for key_info in ret :
hostname = key_info [ 'keyName' ]
hosts . append ( hostname )
return { 'status' : True , 'hosts' : hosts } |
def cleanup_outdir ( outdir , archive ) :
"""Cleanup outdir after extraction and return target file name and
result string .""" | make_user_readable ( outdir )
# move single directory or file in outdir
( success , msg ) = move_outdir_orphan ( outdir )
if success : # msg is a single directory or filename
return msg , "`%s'" % msg
# outdir remains unchanged
# rename it to something more user - friendly ( basically the archive
# name without extension )
outdir2 = util . get_single_outfile ( "" , archive )
os . rename ( outdir , outdir2 )
return outdir2 , "`%s' (%s)" % ( outdir2 , msg ) |
def filenames ( self ) :
"""Returns the filenames that this par2 file repairs .""" | return [ p . name for p in self . packets if isinstance ( p , FileDescriptionPacket ) ] |
def set_tts ( self , level ) :
"""Set the values for
: data : ` ~ aeneas . runtimeconfiguration . RuntimeConfiguration . TTS `
and
: data : ` ~ aeneas . runtimeconfiguration . RuntimeConfiguration . TTS _ PATH `
matching the given granularity level .
Currently supported levels :
* ` ` 1 ` ` ( paragraph )
* ` ` 2 ` ` ( sentence )
* ` ` 3 ` ` ( word )
: param int level : the desired granularity level""" | if level in self . TTS_GRANULARITY_MAP . keys ( ) :
tts_key , tts_path_key = self . TTS_GRANULARITY_MAP [ level ]
self [ self . TTS ] = self [ tts_key ]
self [ self . TTS_PATH ] = self [ tts_path_key ] |
def browse ( package , homepage ) :
"""Browse to a package ' s PyPI or project homepage .""" | p = Package ( package )
try :
if homepage :
secho ( u'Opening homepage for "{0}"...' . format ( package ) , bold = True )
url = p . home_page
else :
secho ( u'Opening PyPI page for "{0}"...' . format ( package ) , bold = True )
url = p . package_url
except NotFoundError :
abort_not_found ( package )
click . launch ( url ) |
def _fetch_url ( url , is_threaded , timeout = None ) :
"""Crawls the html content of the parameter url and saves the html in _ results
: param url :
: param is _ threaded : If True , results will be stored for later processing by the fetch _ urls method . Else not .
: param timeout : in seconds , if None , the urllib default is used
: return : html of the url""" | headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib . request . Request ( url , None , headers )
html = urllib . request . urlopen ( req , data = None , timeout = timeout ) . read ( )
if is_threaded :
SimpleCrawler . _results [ url ] = html
return html |
def sample_annotation ( data ) :
"""Annotate miRNAs using miRBase database with seqbuster tool""" | names = data [ "rgnames" ] [ 'sample' ]
tools = dd . get_expression_caller ( data )
work_dir = os . path . join ( dd . get_work_dir ( data ) , "mirbase" )
out_dir = os . path . join ( work_dir , names )
utils . safe_makedir ( out_dir )
out_file = op . join ( out_dir , names )
if dd . get_mirbase_hairpin ( data ) :
mirbase = op . abspath ( op . dirname ( dd . get_mirbase_hairpin ( data ) ) )
if utils . file_exists ( data [ "collapse" ] ) :
data [ 'transcriptome_bam' ] = _align ( data [ "collapse" ] , dd . get_mirbase_hairpin ( data ) , out_file , data )
data [ 'seqbuster' ] = _miraligner ( data [ "collapse" ] , out_file , dd . get_species ( data ) , mirbase , data [ 'config' ] )
data [ "mirtop" ] = _mirtop ( data [ 'seqbuster' ] , dd . get_species ( data ) , mirbase , out_dir , data [ 'config' ] )
else :
logger . debug ( "Trimmed collapsed file is empty for %s." % names )
else :
logger . debug ( "No annotation file from miRBase." )
sps = dd . get_species ( data ) if dd . get_species ( data ) else "None"
logger . debug ( "Looking for mirdeep2 database for %s" % names )
if file_exists ( op . join ( dd . get_work_dir ( data ) , "mirdeep2" , "novel" , "hairpin.fa" ) ) :
data [ 'seqbuster_novel' ] = _miraligner ( data [ "collapse" ] , "%s_novel" % out_file , sps , op . join ( dd . get_work_dir ( data ) , "mirdeep2" , "novel" ) , data [ 'config' ] )
if "trna" in tools :
data [ 'trna' ] = _mint_trna_annotation ( data )
data = spikein . counts_spikein ( data )
return [ [ data ] ] |
def rpc_get_zonefiles ( self , zonefile_hashes , ** con_info ) :
"""Get zonefiles from the local zonefile set .
Only return at most 100 zonefiles .
Return { ' status ' : True , ' zonefiles ' : { zonefile _ hash : zonefile } } on success
Return { ' error ' : . . . } on error
zonefiles will be serialized to string and base64 - encoded""" | conf = get_blockstack_opts ( )
if not is_atlas_enabled ( conf ) :
return { 'error' : 'No data' , 'http_status' : 400 }
if 'zonefiles' not in conf :
return { 'error' : 'No zonefiles directory (likely a configuration bug)' , 'http_status' : 404 }
if type ( zonefile_hashes ) != list :
log . error ( "Not a zonefile hash list" )
return { 'error' : 'Invalid zonefile hashes' , 'http_status' : 400 }
if len ( zonefile_hashes ) > 100 :
log . error ( "Too many requests (%s)" % len ( zonefile_hashes ) )
return { 'error' : 'Too many requests (no more than 100 allowed)' , 'http_status' : 400 }
for zfh in zonefile_hashes :
if not check_string ( zfh , min_length = LENGTHS [ 'value_hash' ] * 2 , max_length = LENGTHS [ 'value_hash' ] * 2 , pattern = OP_HEX_PATTERN ) :
return { 'error' : 'Invalid zone file hash' , 'http_status' : 400 }
ret = { }
for zonefile_hash in zonefile_hashes :
zonefile_data = self . get_zonefile_data ( zonefile_hash , conf [ 'zonefiles' ] )
if zonefile_data is None :
continue
else :
ret [ zonefile_hash ] = base64 . b64encode ( zonefile_data )
log . debug ( "Serve back %s zonefiles" % len ( ret . keys ( ) ) )
return self . success_response ( { 'zonefiles' : ret } ) |
def detect_microbit ( self ) :
"""Detect a microbit .""" | try :
gpad = MicroBitPad ( self )
except ModuleNotFoundError :
warn ( "The microbit library could not be found in the pythonpath. \n" "For more information, please visit \n" "https://inputs.readthedocs.io/en/latest/user/microbit.html" , RuntimeWarning )
else :
self . microbits . append ( gpad )
self . gamepads . append ( gpad ) |
def upvote ( self ) :
"""Upvote : class : ` Issue ` .""" | self . requester . post ( '/{endpoint}/{id}/upvote' , endpoint = self . endpoint , id = self . id )
return self |
def wait_until_page_does_not_contain_these_elements ( self , timeout , * locators ) :
"""Waits until all of the specified elements are not found on the page .
| * Argument * | * Description * | * Example * |
| timeout | maximum time to wait , if set to $ { None } it will use Selenium ' s default timeout | 5s |
| * locators | Selenium 2 element locator ( s ) | id = MyId |""" | self . _wait_until_no_error ( timeout , self . _wait_for_elements_to_go_away , locators ) |
def getDiscountedBulkPrice ( self ) :
"""Compute discounted bulk discount excl . VAT""" | price = self . getBulkPrice ( )
price = price and price or 0
discount = self . bika_setup . getMemberDiscount ( )
discount = discount and discount or 0
return float ( price ) - ( float ( price ) * float ( discount ) ) / 100 |
def get_parser ( parser ) :
"""Grabs the parser .
args :
parser : The parser""" | parser . description = textwrap . dedent ( """
Segment the .po files in LOCALE(s) based on the segmenting rules in
config.yaml.
Note that segmenting is *not* idempotent: it modifies the input file, so
be careful that you don't run it twice on the same file.
""" . strip ( ) )
parser . add_argument ( "locale" , nargs = "+" , help = "a locale to segment" ) |
def shear_mod ( self ) :
"""Strain - compatible shear modulus [ kN / / m2 ] .""" | try :
value = self . _shear_mod . value
except AttributeError :
value = self . _shear_mod
return value |
def parse_gene_list ( path : str , graph : Graph , anno_type : str = "name" ) -> list :
"""Parse a list of genes and return them if they are in the network .
: param str path : The path of input file .
: param Graph graph : The graph with genes as nodes .
: param str anno _ type : The type of annotation with two options : name - Entrez ID , symbol - HGNC symbol .
: return list : A list of genes , all of which are in the network .""" | # read the file
genes = pd . read_csv ( path , header = None ) [ 0 ] . tolist ( )
genes = [ str ( int ( gene ) ) for gene in genes ]
# get those genes which are in the network
ind = [ ]
if anno_type == "name" :
ind = graph . vs . select ( name_in = genes ) . indices
elif anno_type == "symbol" :
ind = graph . vs . select ( symbol_in = genes ) . indices
else :
raise Exception ( "The type can either be name or symbol, {} is not " "supported" . format ( anno_type ) )
genes = graph . vs [ ind ] [ anno_type ]
return genes |
def get_dependencies ( ctx , archive_name , version ) :
'''List the dependencies of an archive''' | _generate_api ( ctx )
var = ctx . obj . api . get_archive ( archive_name )
deps = [ ]
dependencies = var . get_dependencies ( version = version )
for arch , dep in dependencies . items ( ) :
if dep is None :
deps . append ( arch )
else :
deps . append ( '{}=={}' . format ( arch , dep ) )
click . echo ( '\n' . join ( deps ) ) |
def _verify_nonce ( self , nonce , context ) :
"""Verify the received OIDC ' nonce ' from the ID Token .
: param nonce : OIDC nonce
: type nonce : str
: param context : current request context
: type context : satosa . context . Context
: raise SATOSAAuthenticationError : if the nonce is incorrect""" | backend_state = context . state [ self . name ]
if nonce != backend_state [ NONCE_KEY ] :
satosa_logging ( logger , logging . DEBUG , "Missing or invalid nonce in authn response for state: %s" % backend_state , context . state )
raise SATOSAAuthenticationError ( context . state , "Missing or invalid nonce in authn response" ) |
def ConnectNoSSL ( host = 'localhost' , port = 443 , user = 'root' , pwd = '' , service = "hostd" , adapter = "SOAP" , namespace = None , path = "/sdk" , version = None , keyFile = None , certFile = None , thumbprint = None , b64token = None , mechanism = 'userpass' ) :
"""Provides a standard method for connecting to a specified server without SSL
verification . Useful when connecting to servers with self - signed certificates
or when you wish to ignore SSL altogether . Will attempt to create an unverified
SSL context and then connect via the Connect method .""" | if hasattr ( ssl , '_create_unverified_context' ) :
sslContext = ssl . _create_unverified_context ( )
else :
sslContext = None
return Connect ( host = host , port = port , user = user , pwd = pwd , service = service , adapter = adapter , namespace = namespace , path = path , version = version , keyFile = keyFile , certFile = certFile , thumbprint = thumbprint , sslContext = sslContext , b64token = b64token , mechanism = mechanism ) |
def calculate_mean_edit_distance_and_loss ( iterator , dropout , reuse ) :
r'''This routine beam search decodes a mini - batch and calculates the loss and mean edit distance .
Next to total and average loss it returns the mean edit distance ,
the decoded result and the batch ' s original Y .''' | # Obtain the next batch of data
( batch_x , batch_seq_len ) , batch_y = iterator . get_next ( )
# Calculate the logits of the batch
logits , _ = create_model ( batch_x , batch_seq_len , dropout , reuse = reuse )
# Compute the CTC loss using TensorFlow ' s ` ctc _ loss `
total_loss = tf . nn . ctc_loss ( labels = batch_y , inputs = logits , sequence_length = batch_seq_len )
# Calculate the average loss across the batch
avg_loss = tf . reduce_mean ( total_loss )
# Finally we return the average loss
return avg_loss |
def _repeat_length ( cls , part ) :
"""The length of the repeated portions of ` ` part ` ` .
: param part : a number
: type part : list of int
: returns : the first index at which part repeats
: rtype : int
If part does not repeat , result is the length of part .
Complexity : O ( len ( part ) ^ 2)""" | repeat_len = len ( part )
if repeat_len == 0 :
return repeat_len
first_digit = part [ 0 ]
limit = repeat_len // 2 + 1
indices = ( i for i in range ( 1 , limit ) if part [ i ] == first_digit )
for index in indices :
( quot , rem ) = divmod ( repeat_len , index )
if rem == 0 :
first_chunk = part [ 0 : index ]
if all ( first_chunk == part [ x : x + index ] for x in range ( index , quot * index , index ) ) :
return index
return repeat_len |
def undelay ( self ) :
'''resolves all delayed arguments''' | i = 0
while i < len ( self ) :
op = self [ i ]
i += 1
if hasattr ( op , 'arg1' ) :
if isinstance ( op . arg1 , DelayedArg ) :
op . arg1 = op . arg1 . resolve ( )
if isinstance ( op . arg1 , CodeBlock ) :
op . arg1 . undelay ( ) |
def getEntity ( self , id = None , uri = None , match = None ) :
"""get a generic entity with given ID or via other methods . . .""" | if not id and not uri and not match :
return None
if type ( id ) == type ( "string" ) :
uri = id
id = None
if not uri . startswith ( "http://" ) :
match = uri
uri = None
if match :
if type ( match ) != type ( "string" ) :
return [ ]
res = [ ]
if ":" in match : # qname
for x in self . classes :
if match . lower ( ) in x . qname . lower ( ) :
res += [ x ]
for x in self . properties :
if match . lower ( ) in x . qname . lower ( ) :
res += [ x ]
else :
for x in self . classes :
if match . lower ( ) in x . uri . lower ( ) :
res += [ x ]
for x in self . properties :
if match . lower ( ) in x . uri . lower ( ) :
res += [ x ]
return res
else :
for x in self . classes :
if id and x . id == id :
return x
if uri and x . uri . lower ( ) == uri . lower ( ) :
return x
for x in self . properties :
if id and x . id == id :
return x
if uri and x . uri . lower ( ) == uri . lower ( ) :
return x
return None |
def handle ( self , connection_id , message_content ) :
"""If the connection wants to take on a role that requires a challenge to
be signed , it will request the challenge by sending an
AuthorizationChallengeRequest to the validator it wishes to connect to .
The validator will send back a random payload that must be signed .
If the connection has not sent a ConnectionRequest or the connection
has already recieved an AuthorizationChallengeResponse , an
AuthorizationViolation will be returned and the connection will be
closed .""" | if self . _network . get_connection_status ( connection_id ) != ConnectionStatus . CONNECTION_REQUEST :
LOGGER . debug ( "Connection's previous message was not a" " ConnectionRequest, Remove connection to %s" , connection_id )
violation = AuthorizationViolation ( violation = RoleType . Value ( "NETWORK" ) )
return HandlerResult ( HandlerStatus . RETURN_AND_CLOSE , message_out = violation , message_type = validator_pb2 . Message . AUTHORIZATION_VIOLATION )
random_payload = os . urandom ( PAYLOAD_LENGTH )
self . _challenge_payload_cache [ connection_id ] = random_payload
auth_challenge_response = AuthorizationChallengeResponse ( payload = random_payload )
self . _network . update_connection_status ( connection_id , ConnectionStatus . AUTH_CHALLENGE_REQUEST )
return HandlerResult ( HandlerStatus . RETURN , message_out = auth_challenge_response , message_type = validator_pb2 . Message . AUTHORIZATION_CHALLENGE_RESPONSE ) |
def quote_completions ( self , completions , cword_prequote , last_wordbreak_pos ) :
"""If the word under the cursor started with a quote ( as indicated by a nonempty ` ` cword _ prequote ` ` ) , escapes
occurrences of that quote character in the completions , and adds the quote to the beginning of each completion .
Otherwise , escapes all characters that bash splits words on ( ` ` COMP _ WORDBREAKS ` ` ) , and removes portions of
completions before the first colon if ( ` ` COMP _ WORDBREAKS ` ` ) contains a colon .
If there is only one completion , and it doesn ' t end with a * * continuation character * * ( ` ` / ` ` , ` ` : ` ` , or ` ` = ` ` ) ,
adds a space after the completion .
This method is exposed for overriding in subclasses ; there is no need to use it directly .""" | special_chars = "\\"
# If the word under the cursor was quoted , escape the quote char .
# Otherwise , escape all special characters and specially handle all COMP _ WORDBREAKS chars .
if cword_prequote == "" : # Bash mangles completions which contain characters in COMP _ WORDBREAKS .
# This workaround has the same effect as _ _ ltrim _ colon _ completions in bash _ completion
# ( extended to characters other than the colon ) .
if last_wordbreak_pos :
completions = [ c [ last_wordbreak_pos + 1 : ] for c in completions ]
special_chars += "();<>|&!`$* \t\n\"'"
elif cword_prequote == '"' :
special_chars += '"`$!'
if os . environ . get ( "_ARGCOMPLETE_SHELL" ) == "tcsh" : # tcsh escapes special characters itself .
special_chars = ""
elif cword_prequote == "'" : # Nothing can be escaped in single quotes , so we need to close
# the string , escape the single quote , then open a new string .
special_chars = ""
completions = [ c . replace ( "'" , r"'\''" ) for c in completions ]
for char in special_chars :
completions = [ c . replace ( char , "\\" + char ) for c in completions ]
if self . append_space : # Similar functionality in bash was previously turned off by supplying the " - o nospace " option to complete .
# Now it is conditionally disabled using " compopt - o nospace " if the match ends in a continuation character .
# This code is retained for environments where this isn ' t done natively .
continuation_chars = "=/:"
if len ( completions ) == 1 and completions [ 0 ] [ - 1 ] not in continuation_chars :
if cword_prequote == "" :
completions [ 0 ] += " "
return completions |
def hilbertrot ( n , x , y , rx , ry ) :
"""Rotates and flips a quadrant appropriately for the Hilbert scan
generator . See https : / / en . wikipedia . org / wiki / Hilbert _ curve .""" | if ry == 0 :
if rx == 1 :
x = n - 1 - x
y = n - 1 - y
return y , x
return x , y |
def getPrimaryRole ( store , primaryRoleName , createIfNotFound = False ) :
"""Get Role object corresponding to an identifier name . If the role name
passed is the empty string , it is assumed that the user is not
authenticated , and the ' Everybody ' role is primary . If the role name
passed is non - empty , but has no corresponding role , the ' Authenticated '
role - which is a member of ' Everybody ' - is primary . Finally , a specific
role can be primary if one exists for the user ' s given credentials , that
will automatically always be a member of ' Authenticated ' , and by extension ,
of ' Everybody ' .
@ param primaryRoleName : a unicode string identifying the role to be
retrieved . This corresponds to L { Role } ' s externalID attribute .
@ param createIfNotFound : a boolean . If True , create a role for the given
primary role name if no exact match is found . The default , False , will
instead retrieve the ' nearest match ' role , which can be Authenticated or
Everybody depending on whether the user is logged in or not .
@ return : a L { Role } .""" | if not primaryRoleName :
return getEveryoneRole ( store )
ff = store . findUnique ( Role , Role . externalID == primaryRoleName , default = None )
if ff is not None :
return ff
authRole = getAuthenticatedRole ( store )
if createIfNotFound :
role = Role ( store = store , externalID = primaryRoleName )
role . becomeMemberOf ( authRole )
return role
return authRole |
def get_source_link ( file , line , display_text = "[source]" , ** kwargs ) -> str :
"Returns github link for given file" | link = f"{SOURCE_URL}{file}#L{line}"
if display_text is None :
return link
return f'<a href="{link}" class="source_link" style="float:right">{display_text}</a>' |
def proxy ( self ) :
"""Return a Deferred that will result in a proxy object in the future .""" | d = Deferred ( self . loop )
self . _proxy_deferreds . append ( d )
if self . _proxy :
d . callback ( self . _proxy )
return d |
def postIncidents ( self , name , message , status , visible , ** kwargs ) :
'''Create a new incident .
: param name : Name of the incident
: param message : A message ( supporting Markdown ) to explain more .
: param status : Status of the incident .
: param visible : Whether the incident is publicly visible .
: param component _ id : ( optional ) Component to update .
: param component _ status : ( optional ) The status to update the given component with .
: param notify : ( optional ) Whether to notify subscribers .
: return : : class : ` Response < Response > ` object
: rtype : requests . Response''' | kwargs [ 'name' ] = name
kwargs [ 'message' ] = message
kwargs [ 'status' ] = status
kwargs [ 'visible' ] = visible
return self . __postRequest ( '/incidents' , kwargs ) |
def wait_for_event ( event ) :
"""Wraps a win32 event into a ` Future ` and wait for it .""" | f = Future ( )
def ready ( ) :
get_event_loop ( ) . remove_win32_handle ( event )
f . set_result ( None )
get_event_loop ( ) . add_win32_handle ( event , ready )
return f |
def _extract_translations ( self , domains ) :
"""Extract the translations into ` . pot ` files""" | for domain , options in domains . items ( ) : # Create the extractor
extractor = babel_frontend . extract_messages ( )
extractor . initialize_options ( )
# The temporary location to write the ` . pot ` file
extractor . output_file = options [ 'pot' ]
# Add the comments marked with ' tn : ' to the translation file for translators to read . Strip the marker .
extractor . add_comments = [ 'tn:' ]
extractor . strip_comments = True
# The directory where the sources for this domain are located
extractor . input_paths = [ options [ 'source' ] ]
# Pass the metadata to the translator
extractor . msgid_bugs_address = self . manager . args . contact
extractor . copyright_holder = self . manager . args . copyright
extractor . version = self . manager . args . version
extractor . project = self . manager . args . project
extractor . finalize_options ( )
# Add keywords for lazy translation functions , based on their non - lazy variants
extractor . keywords . update ( { 'gettext_lazy' : extractor . keywords [ 'gettext' ] , 'ngettext_lazy' : extractor . keywords [ 'ngettext' ] , '__' : extractor . keywords [ 'gettext' ] , # double underscore for lazy
} )
# Do the extraction
_run_babel_command ( extractor ) |
def _integrate_cvode ( self , * args , ** kwargs ) :
"""Do not use directly ( use ` ` integrate ( . . . , integrator = ' cvode ' ) ` ` ) .
Uses CVode from CVodes in
` SUNDIALS < https : / / computation . llnl . gov / casc / sundials / > ` _
( via ` pycvodes < https : / / pypi . python . org / pypi / pycvodes > ` _ )
to integrate the ODE system .""" | import pycvodes
# Python interface to SUNDIALS ' s cvodes integrators
kwargs [ 'with_jacobian' ] = kwargs . get ( 'method' , 'bdf' ) in pycvodes . requires_jac
if 'lband' in kwargs or 'uband' in kwargs or 'band' in kwargs :
raise ValueError ( "lband and uband set locally (set at" " initialization instead)" )
if self . band is not None :
kwargs [ 'lband' ] , kwargs [ 'uband' ] = self . band
kwargs [ 'autonomous_exprs' ] = self . autonomous_exprs
return self . _integrate ( pycvodes . integrate_adaptive , pycvodes . integrate_predefined , * args , ** kwargs ) |
def validate ( self ) :
'''Perform integrity checks on the modes in this document .
Returns :
None''' | for r in self . roots :
refs = r . references ( )
check_integrity ( refs ) |
def concentric_circle ( center , radius , size = None ) :
"""Draws a circle with the given center and radius .
This is designed to ensure that concentric circles with integer radii are " airtight " ,
i . e . there are not unfilled pixels between them .
: param center : The ( x , y ) coordinates of the center of the circle
: param radius :
: param size : If not None , the size of the image . This is used to skip pizxels that are out of bounds .
: return : This is a generator that yields ( x , y ) coordinates of the circle one at a time""" | c_out = bresenham_circle_octant ( radius + 1 )
c_in = bresenham_circle_octant ( radius )
coords = [ ]
# note that in this loop , y also serves as the array index ,
# since it starts at 0 and increments each element .
for x , y in c_in :
for x1 in range ( x , c_out [ y ] [ 0 ] ) :
coords . append ( ( x1 , y ) )
# copy octant 8 times to get other pixels
# TODO might recount pixels where x = = y
next_octant = [ ( y , x ) for x , y in reversed ( coords ) ]
coords . extend ( next_octant )
next_quadrant = [ ( - y , x ) for x , y in coords ]
coords . extend ( next_quadrant )
next_half = [ ( - x , - y ) for x , y in coords ]
coords . extend ( next_half )
for x , y in coords :
c = x + center [ 0 ] , y + center [ 1 ]
if size is not None :
if not in_bounds ( ( 0 , 0 ) , size , c ) :
continue
yield c |
def _lemmatise_contractions ( self , f , * args , ** kwargs ) :
"""Lemmatise un mot f avec sa contraction
: param f : Mot à lemmatiser
: yield : Match formated like in _ lemmatise ( )""" | fd = f
for contraction , decontraction in self . _contractions . items ( ) :
if fd . endswith ( contraction ) :
fd = f [ : - len ( contraction ) ]
if "v" in fd or "V" in fd :
fd += decontraction
else :
fd += deramise ( decontraction )
yield from self . _lemmatise ( fd , * args , ** kwargs ) |
def decode ( s ) :
"""Decode a string using the system encoding if needed ( ie byte strings )""" | if isinstance ( s , bytes ) :
return s . decode ( sys . getdefaultencoding ( ) )
else :
return s |
def get_signature ( func ) :
"""Gathers information about the call signature of ` func ` .""" | code = func . __code__
# Names of regular parameters
parameters = tuple ( code . co_varnames [ : code . co_argcount ] )
# Flags
has_varargs = bool ( code . co_flags & inspect . CO_VARARGS )
has_varkw = bool ( code . co_flags & inspect . CO_VARKEYWORDS )
has_kwonly = bool ( code . co_kwonlyargcount )
# A mapping of parameter names to default values
default_values = func . __defaults__ or ( )
defaults = dict ( zip ( parameters [ - len ( default_values ) : ] , default_values ) )
# Type annotations for all parameters
type_hints = typing . get_type_hints ( func ) if typing else func . __annotations__
types = tuple ( normalize_type ( type_hints . get ( param , AnyType ) ) for param in parameters )
# Type annotations for required parameters
required = types [ : - len ( defaults ) ] if defaults else types
# Complexity
complexity = tuple ( map ( type_complexity , types ) ) if typing else None
return Signature ( parameters , types , complexity , defaults , required , has_varargs , has_varkw , has_kwonly ) |
def _hook_decorator ( self , addr , length = 0 , kwargs = None ) :
"""Return a function decorator that allows easy hooking . Please refer to hook ( ) for its usage .
: return : The function decorator .""" | def hook_decorator ( func ) :
self . hook ( addr , func , length = length , kwargs = kwargs )
return func
return hook_decorator |
def full_analysis ( self , ncpus = 1 , ** kwargs ) :
"""Perform a full structural analysis of a molecule .
This invokes other methods :
1 . : attr : ` molecular _ weight ( ) `
2 . : attr : ` calculate _ centre _ of _ mass ( ) `
3 . : attr : ` calculate _ maximum _ diameter ( ) `
4 . : attr : ` calculate _ average _ diameter ( ) `
5 . : attr : ` calculate _ pore _ diameter ( ) `
6 . : attr : ` calculate _ pore _ volume ( ) `
7 . : attr : ` calculate _ pore _ diameter _ opt ( ) `
8 . : attr : ` calculate _ pore _ volume _ opt ( ) `
9 . : attr : ` calculate _ pore _ diameter _ opt ( ) `
10 . : attr : ` calculate _ windows ( ) `
Parameters
ncpus : : class : ` int `
Number of CPUs used for the parallelised parts of
: func : ` pywindow . utilities . find _ windows ( ) ` . ( default = 1 = serial )
Returns
: attr : ` Molecule . properties `
The updated : attr : ` Molecule . properties ` with returns of all
used methods .""" | self . molecular_weight ( )
self . calculate_centre_of_mass ( )
self . calculate_maximum_diameter ( )
self . calculate_average_diameter ( )
self . calculate_pore_diameter ( )
self . calculate_pore_volume ( )
self . calculate_pore_diameter_opt ( ** kwargs )
self . calculate_pore_volume_opt ( ** kwargs )
self . calculate_windows ( ncpus = ncpus , ** kwargs )
# self . _ circumcircle ( * * kwargs )
return self . properties |
def organizations_search ( self , external_id = None , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / core / organizations # search - organizations - by - external - id" | api_path = "/api/v2/organizations/search.json"
api_query = { }
if "query" in kwargs . keys ( ) :
api_query . update ( kwargs [ "query" ] )
del kwargs [ "query" ]
if external_id :
api_query . update ( { "external_id" : external_id , } )
return self . call ( api_path , query = api_query , ** kwargs ) |
def filter_unnecessary_ports ( query , device_owners = None , vnic_type = None , active = True ) :
"""Filter out all ports are not needed on CVX""" | query = ( query . filter_unbound_ports ( ) . filter_by_device_owner ( device_owners ) . filter_by_device_id ( ) . filter_unmanaged_physnets ( ) )
if active :
query = query . filter_inactive_ports ( )
if vnic_type :
query = query . filter_by_vnic_type ( vnic_type )
return query |
def post_event_publish ( self , id , ** data ) :
"""POST / events / : id / publish /
Publishes an event if it has not already been deleted . In order for publish to be permitted , the event must have all
necessary information , including a name and description , an organizer , at least one ticket , and valid payment options .
This API endpoint will return argument errors for event fields that fail to validate the publish requirements . Returns
a boolean indicating success or failure of the publish .
field _ error event . name MISSING
Your event must have a name to be published .
field _ error event . start MISSING
Your event must have a start date to be published .
field _ error event . end MISSING
Your event must have an end date to be published .
field _ error event . start . timezone MISSING
Your event start and end dates must have matching time zones to be published .
field _ error event . organizer MISSING
Your event must have an organizer to be published .
field _ error event . currency MISSING
Your event must have a currency to be published .
field _ error event . currency INVALID
Your event must have a valid currency to be published .
field _ error event . tickets MISSING
Your event must have at least one ticket to be published .
field _ error event . tickets . N . name MISSING
All tickets must have names in order for your event to be published . The N will be the ticket class ID with the
error .
field _ error event . tickets . N . quantity _ total MISSING
All non - donation tickets must have an available quantity value in order for your event to be published . The N
will be the ticket class ID with the error .
field _ error event . tickets . N . cost MISSING
All non - donation tickets must have a cost ( which can be ` ` 0.00 ` ` for free tickets ) in order for your event to
be published . The N will be the ticket class ID with the error .""" | return self . post ( "/events/{0}/publish/" . format ( id ) , data = data ) |
def _check_contigs_to_use ( self , ref_dict ) :
'''Checks that the set of contigs to use are all in the reference
fasta lengths dict made by self . _ get _ ref _ lengths ( )''' | if self . contigs_to_use is None :
return True
for contig in self . contigs_to_use :
if contig not in ref_dict :
raise Error ( 'Requested to use contig "' + contig + '", but not found in input BAM file "' + self . bam + '"' )
return True |
def refreshUi ( self ) :
"""Matches the UI state to the current cursor positioning .""" | font = self . currentFont ( )
for name in ( 'underline' , 'bold' , 'italic' , 'strikeOut' ) :
getter = getattr ( font , name )
act = self . _actions [ name ]
act . blockSignals ( True )
act . setChecked ( getter ( ) )
act . blockSignals ( False ) |
def del_properties ( self , properties , recursive = None ) :
"""Delete properties listed in properties
properties - iterable contains the property names to delete . If it is an
str it will be casted to tuple .
recursive - on folders property attachment is recursive by default . It is
possible to force recursive behavior .""" | return self . _accessor . del_properties ( self , properties , recursive ) |
def get_sun_rise_set_transit ( self , times , method = 'pyephem' , ** kwargs ) :
"""Calculate sunrise , sunset and transit times .
Parameters
times : DatetimeIndex
Must be localized to the Location
method : str , default ' pyephem '
' pyephem ' , ' spa ' , or ' geometric '
kwargs are passed to the relevant functions . See
solarposition . sun _ rise _ set _ transit _ < method > for details .
Returns
result : DataFrame
Column names are : ` ` sunrise , sunset , transit ` ` .""" | if method == 'pyephem' :
result = solarposition . sun_rise_set_transit_ephem ( times , self . latitude , self . longitude , ** kwargs )
elif method == 'spa' :
result = solarposition . sun_rise_set_transit_spa ( times , self . latitude , self . longitude , ** kwargs )
elif method == 'geometric' :
sr , ss , tr = solarposition . sun_rise_set_transit_geometric ( times , self . latitude , self . longitude , ** kwargs )
result = pd . DataFrame ( index = times , data = { 'sunrise' : sr , 'sunset' : ss , 'transit' : tr } )
else :
raise ValueError ( '{} is not a valid method. Must be ' 'one of pyephem, spa, geometric' . format ( method ) )
return result |
def parse_obj ( obj ) :
"""> > > parse _ obj ( ' bucket / key ' )
( ' bucket ' , ' key ' )
> > > parse _ obj ( ' my - bucket / path / to / file . txt ' )
( ' my - bucket ' , ' path / to / file . txt ' )
> > > parse _ obj ( ' s3 : / / this _ bucket / some / path . txt ' )
( ' this _ bucket ' , ' some / path . txt ' )
> > > parse _ obj ( ' https : / / s3 . amazonaws . com / bucket / file . txt ' )
( ' bucket ' , ' file . txt ' )
> > > parse _ obj ( ' http : / / the - bucket . s3 . amazonaws . com / the / file . txt ' )
( ' the - bucket ' , ' the / file . txt ' )""" | obj = obj . lstrip ( 's3://' )
if obj . startswith ( 'http' ) :
url = urlparse . urlparse ( obj )
if url . netloc == 's3.amazonaws.com' :
path = url . path [ 1 : ]
# remove leading slash
bucket , key = path . split ( '/' , 1 )
else : # bucket . s3 . amazonaws . com form
bucket = url . netloc . split ( '.' , 1 ) [ 0 ]
key = url . path [ 1 : ]
else :
bucket , key = obj . split ( '/' , 1 )
return bucket , key |
def convert_time ( self , time ) :
"""A helper function to convert seconds into hh : mm : ss for better
readability .
time : A string representing time in seconds .""" | time_string = str ( datetime . timedelta ( seconds = int ( time ) ) )
if time_string . split ( ':' ) [ 0 ] == '0' :
time_string = time_string . partition ( ':' ) [ 2 ]
return time_string |
def get_by_email ( cls , email ) :
"""Return a User by email address""" | return cls . query ( ) . filter ( cls . email == email ) . first ( ) |
def _find_matching_collections_internally ( collections , record ) :
"""Find matching collections with internal engine .
: param collections : set of collections where search
: param record : record to match""" | for name , data in iteritems ( collections ) :
if _build_query ( data [ 'query' ] ) . match ( record ) :
yield data [ 'ancestors' ]
raise StopIteration |
def items ( self , path = None ) :
"""Returns set of items .
: param path : Regex filter on item path .
: return : List of Item class objects .""" | items = list ( self . iteritems ( ) )
if path is not None :
path += '$'
regex = re . compile ( path )
items = [ i for i in items if regex . match ( i . path ) ]
return items |
def onSave ( self , event , alert = False , destroy = True ) :
"""Save grid data""" | # tidy up drop _ down menu
if self . drop_down_menu :
self . drop_down_menu . clean_up ( )
# then save actual data
self . grid_builder . save_grid_data ( )
if not event and not alert :
return
# then alert user
wx . MessageBox ( 'Saved!' , 'Info' , style = wx . OK | wx . ICON_INFORMATION )
if destroy :
self . Destroy ( ) |
def autocmds ( namespace = None , args = None , command_suffix = '_command' , add_dry_run_option = True , add_verbosity_option = True ) :
"""Parse and run commands .
Will search ` ` namespace ` ` for functions that end with ` ` command _ suffix ` ` .
: param namespace : the namespace / module to search for commands
: param args : the arguments for the command parser . defaults to
: data : ` sys . argv `
: param command _ suffix : function name suffix that indicates that a
function is a command .""" | if namespace is None :
namespace = inspect . currentframe ( ) . f_back . f_globals
elif type ( namespace ) is types . ModuleType :
namespace = namespace . __dict__
if args is None :
args = sys . argv
if len ( args ) < 2 or args [ 1 ] in ( '-h' , '--help' ) :
print_help ( namespace , command_suffix )
return
command_name = args . pop ( 1 ) . replace ( '-' , '_' )
function = namespace [ command_name + command_suffix ]
parse_and_run_function ( function , args , command_name , add_dry_run_option = add_dry_run_option , add_verbosity_option = add_verbosity_option ) |
def difference ( self , * others ) :
r"""Return a new multiset with all elements from the others removed .
> > > ms = Multiset ( ' aab ' )
> > > sorted ( ms . difference ( ' bc ' ) )
[ ' a ' , ' a ' ]
You can also use the ` ` - ` ` operator for the same effect . However , the operator version
will only accept a set as other operator , not any iterable , to avoid errors .
> > > ms = Multiset ( ' aabbbc ' )
> > > sorted ( ms - Multiset ( ' abd ' ) )
[ ' a ' , ' b ' , ' b ' , ' c ' ]
For a variant of the operation which modifies the multiset in place see
: meth : ` difference _ update ` .
Args :
others : The other sets to remove from the multiset . Can also be any : class : ` ~ typing . Iterable ` \ [ ~ T ]
or : class : ` ~ typing . Mapping ` \ [ ~ T , : class : ` int ` ] which are then converted to : class : ` Multiset ` \ [ ~ T ] .
Returns :
The resulting difference multiset .""" | result = self . __copy__ ( )
_elements = result . _elements
_total = result . _total
for other in map ( self . _as_multiset , others ) :
for element , multiplicity in other . items ( ) :
if element in _elements :
old_multiplicity = _elements [ element ]
new_multiplicity = old_multiplicity - multiplicity
if new_multiplicity > 0 :
_elements [ element ] = new_multiplicity
_total -= multiplicity
else :
del _elements [ element ]
_total -= old_multiplicity
result . _total = _total
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.